diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 9df6656..73ee1b4 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -134,6 +134,7 @@ HiveConf.ConfVars.METASTORE_CACHE_LEVEL2_TYPE, HiveConf.ConfVars.METASTORE_IDENTIFIER_FACTORY, HiveConf.ConfVars.METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK, + HiveConf.ConfVars.METASTORE_ALLOW_NON_SELECT_QUERIES, HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS, HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX, HiveConf.ConfVars.METASTORE_EVENT_LISTENERS, @@ -450,6 +451,8 @@ METASTORE_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy", true, ""), METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck", "LOG", "Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]"), + METASTORE_ALLOW_NON_SELECT_QUERIES("datanucleus.query.sql.allowAll", true, + "pass the property datanucleus.query.sql.allowAll as true when creating the PersistenceManagerFactory"), METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300, "Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \n" + "The higher the number, the less the number of round trips is needed to the Hive metastore server, \n" + diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 33745e4..8d83476 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -5384,13 +5384,8 @@ public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) @Override public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) - throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, - TException { - boolean ret = true; - for (ColumnStatistics colStats : request.getColStats()) { - ret = ret && update_partition_column_statistics(colStats); - } - return ret; + throws MetaException { + return getMS().setPartitionColumnStatistics(request); } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 5a8591a..2e7f0b6 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; @@ -65,6 +66,12 @@ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor; import org.apache.hadoop.hive.serde.serdeConstants; import org.datanucleus.store.rdbms.query.ForwardQueryResult; +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; import com.google.common.collect.Lists; @@ -890,6 +897,159 @@ public ColumnStatistics getTableStats( return result; } + public boolean set_partition_column_statistics(SetPartitionsStatsRequest request) + throws MetaException { + // we assume that all the ColStat data come from the same table from the + // same database. This assumption is sure due to the usage of "analyze" + List colStats = request.getColStats(); + ColumnStatistics first = colStats.get(0); + ColumnStatisticsDesc csd = first.getStatsDesc(); + String dbName = csd.getDbName(); + String tableName = csd.getTableName(); + + // first create a map + boolean doTrace = LOG.isDebugEnabled(); + Map findpartid = new HashMap(); + // get DB_ID + long start = doTrace ? System.nanoTime() : 0; + String qText = "select \"DB_ID\" from \"DBS\" " + " where \"NAME\" = \'" + dbName + "\'"; + Query query = pm.newQuery("javax.jdo.query.SQL", qText); + Object qResult = query.execute(); + ForwardQueryResult fqr = (ForwardQueryResult) qResult; + long dbId = StatObjectConverter.extractSqlLong(fqr.get(0)); + long queryTime = doTrace ? System.nanoTime() : 0; + timingTrace(doTrace, qText, start, queryTime); + // get TBL_ID + start = doTrace ? System.nanoTime() : 0; + qText = "select \"TBL_ID\" from \"TBLS\" " + " where \"DB_ID\" = " + dbId + + " and \"TBL_NAME\" = \'" + tableName + "\'"; + query = pm.newQuery("javax.jdo.query.SQL", qText); + qResult = query.execute(); + fqr = (ForwardQueryResult) qResult; + long tblId = StatObjectConverter.extractSqlLong(fqr.get(0)); + queryTime = doTrace ? System.nanoTime() : 0; + timingTrace(doTrace, qText, start, queryTime); + // get the partname and partid + qText = "select \"PART_NAME\", \"PART_ID\" from \"PARTITIONS\" where \"TBL_ID\" = " + tblId; + start = doTrace ? System.nanoTime() : 0; + query = pm.newQuery("javax.jdo.query.SQL", qText); + qResult = query.execute(); + queryTime = doTrace ? System.nanoTime() : 0; + timingTrace(doTrace, qText, start, queryTime); + List list = ensureList(qResult); + for (Object[] row : list) { + String partName = (String) row[0]; + long partId = StatObjectConverter.extractSqlLong(row[1]); + findpartid.put(partName, partId); + } + // get CS_ID + qText = "select max(\"CS_ID\") from \"PART_COL_STATS\""; + query = pm.newQuery("javax.jdo.query.SQL", qText); + qResult = query.execute(); + fqr = (ForwardQueryResult) qResult; + Long csidseq = StatObjectConverter.extractSqlLong(fqr.get(0)); + if (csidseq == null) { + csidseq = (long) 0; + } + queryTime = doTrace ? System.nanoTime() : 0; + timingTrace(doTrace, qText, start, queryTime); + // then update or insert the object + List partNames = new ArrayList(); + List colNames = new ArrayList(); + boolean getColName = true; + String insertValues = ""; + for (ColumnStatistics colStat : colStats) { + ColumnStatisticsDesc colDesc = colStat.getStatsDesc(); + String partName = colDesc.getPartName(); + partNames.add(partName); + long partId = findpartid.get(partName); + long lastAnalyzed = colDesc.getLastAnalyzed(); + List objs = colStat.getStatsObj(); + for (ColumnStatisticsObj obj : objs) { + String colName = obj.getColName(); + if (getColName) + colNames.add(colName); + String colType = obj.getColType(); + ColumnStatisticsData statsData = obj.getStatsData(); + String row = "" + (++csidseq) + ",\'" + dbName + "\',\'" + tableName + "\',\'" + partName + + "\', " + partId + ",\'" + colName + "\',\'" + colType + "\'," + + prepareColStatsInsert(colType, statsData) + lastAnalyzed; + insertValues += "(" + row + "),"; + } + getColName = false; + } + // bulk delete + qText = "delete from \"PART_COL_STATS\"" + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")" + + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"; + start = doTrace ? System.nanoTime() : 0; + query = pm.newQuery("javax.jdo.query.SQL", qText); + qResult = query.executeWithArray(prepareParams(dbName, tableName, partNames, colNames)); + queryTime = doTrace ? System.nanoTime() : 0; + timingTrace(doTrace, qText, start, queryTime); + // bulk insert + start = doTrace ? System.nanoTime() : 0; + qText = "insert into \"PART_COL_STATS\" " + + "(\"CS_ID\",\"DB_NAME\",\"TABLE_NAME\",\"PARTITION_NAME\",\"PART_ID\"," + STATS_COLLIST + + ") VALUES " + insertValues.substring(0, insertValues.length() - 1); + query = pm.newQuery("javax.jdo.query.SQL", qText); + qResult = query.execute(); + queryTime = doTrace ? System.nanoTime() : 0; + timingTrace(doTrace, qText, start, queryTime); + + return true; + } + + private String prepareColStatsInsert(String colType, ColumnStatisticsData statsData) + throws MetaException { + Map map = new HashMap(); + if (colType.equals("boolean")) { + BooleanColumnStatsData boolStats = statsData.getBooleanStats(); + map.put("\"NUM_TRUES\"", Long.toString(boolStats.getNumTrues())); + map.put("\"NUM_FALSES\"", Long.toString(boolStats.getNumFalses())); + map.put("\"NUM_NULLS\"", Long.toString(boolStats.getNumNulls())); + } else if (colType.equals("string") || colType.startsWith("varchar") + || colType.startsWith("char")) { + StringColumnStatsData stringStats = statsData.getStringStats(); + map.put("\"NUM_NULLS\"", Long.toString(stringStats.getNumNulls())); + map.put("\"AVG_COL_LEN\"", Double.toString(stringStats.getAvgColLen())); + map.put("\"MAX_COL_LEN\"", Long.toString(stringStats.getMaxColLen())); + map.put("\"NUM_DISTINCTS\"", Long.toString(stringStats.getNumDVs())); + } else if (colType.equals("binary")) { + BinaryColumnStatsData binaryStats = statsData.getBinaryStats(); + map.put("\"NUM_NULLS\"", Long.toString(binaryStats.getNumNulls())); + map.put("\"AVG_COL_LEN\"", Double.toString(binaryStats.getAvgColLen())); + map.put("\"MAX_COL_LEN\"", Long.toString(binaryStats.getMaxColLen())); + } else if (colType.equals("bigint") || colType.equals("int") || colType.equals("smallint") + || colType.equals("tinyint") || colType.equals("timestamp")) { + LongColumnStatsData longStats = statsData.getLongStats(); + map.put("\"LONG_HIGH_VALUE\"", Long.toString(longStats.getHighValue())); + map.put("\"LONG_LOW_VALUE\"", Long.toString(longStats.getLowValue())); + map.put("\"NUM_NULLS\"", Long.toString(longStats.getNumNulls())); + map.put("\"NUM_DISTINCTS\"", Long.toString(longStats.getNumDVs())); + } else if (colType.equals("double") || colType.equals("float")) { + DoubleColumnStatsData doubleStats = statsData.getDoubleStats(); + map.put("\"DOUBLE_HIGH_VALUE\"", Double.toString(doubleStats.getHighValue())); + map.put("\"DOUBLE_LOW_VALUE\"", Double.toString(doubleStats.getLowValue())); + map.put("\"NUM_NULLS\"", Long.toString(doubleStats.getNumNulls())); + map.put("\"NUM_DISTINCTS\"", Long.toString(doubleStats.getNumDVs())); + } else if (colType.startsWith("decimal")) { + DecimalColumnStatsData decimalStats = statsData.getDecimalStats(); + map.put("\"BIG_DECIMAL_HIGH_VALUE\"", decimalStats.getHighValue().toString()); + map.put("\"BIG_DECIMAL_LOW_VALUE\"", decimalStats.getLowValue().toString()); + map.put("\"NUM_NULLS\"", Long.toString(decimalStats.getNumNulls())); + map.put("\"NUM_DISTINCTS\"", Long.toString(decimalStats.getNumDVs())); + } else { + throw new MetaException("column type " + colType + " not found"); + } + String[] colNames = STATS_COLLIST.split(","); + String ret = ""; + for (int index = 2; index < colNames.length - 1; index++) { + ret += map.get(colNames[index].trim()) + ","; + } + return ret; + } + public AggrStats aggrColStatsForPartitions(String dbName, String tableName, List partNames, List colNames) throws MetaException { long partsFound = partsFoundForPartitions(dbName, tableName, partNames, diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 637a39a..43560ab 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -5862,6 +5862,21 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List getMTableColumnStatistics( Table table, List colNames) throws MetaException { boolean committed = false; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index 5c5ed7f..d327272 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -556,5 +556,7 @@ public void dropFunction(String dbName, String funcName) public AggrStats get_aggr_stats_for(String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; + + public abstract boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws MetaException; } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 5905efe..b7e83a4 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -726,4 +726,11 @@ public AggrStats get_aggr_stats_for(String dbName, return null; } + @Override + public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) + throws MetaException { + // TODO Auto-generated method stub + return false; + } + } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 88b0791..e8ad144 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -742,6 +742,13 @@ public AggrStats get_aggr_stats_for(String dbName, throws MetaException { return null; } + + @Override + public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) + throws MetaException { + // TODO Auto-generated method stub + return false; + } } diff --git a/ql/src/test/queries/clientpositive/analyze_tbl_part.q b/ql/src/test/queries/clientpositive/analyze_tbl_part.q index 9040bd4..5d80f47 100644 --- a/ql/src/test/queries/clientpositive/analyze_tbl_part.q +++ b/ql/src/test/queries/clientpositive/analyze_tbl_part.q @@ -1,5 +1,7 @@ set hive.stats.dbclass=jdbc:derby; +drop table if exists src_stat_part; + create table src_stat_part(key string, value string) partitioned by (partitionId int); insert overwrite table src_stat_part partition (partitionId=1) @@ -12,8 +14,26 @@ ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for colum describe formatted src_stat_part.key PARTITION(partitionId=1); +ALTER TABLE src_stat_part PARTITION(partitionId=1) UPDATE STATISTICS for column key SET ('numDVs'='11','avgColLen'='2.2'); + +describe formatted src_stat_part.key PARTITION(partitionId=1); + +ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value; + +describe formatted src_stat_part.key PARTITION(partitionId=1); + +ALTER TABLE src_stat_part DROP IF EXISTS PARTITION(partitionId=2); + +insert overwrite table src_stat_part partition (partitionId=4) +select * from src1; + ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value; describe formatted src_stat_part.key PARTITION(partitionId=1); -describe formatted src_stat_part.value PARTITION(partitionId=2); \ No newline at end of file +insert overwrite table src_stat_part partition (partitionId=2) +select * from src1; + +ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value; + +describe formatted src_stat_part.key PARTITION(partitionId=4); diff --git a/ql/src/test/results/clientpositive/analyze_tbl_part.q.out b/ql/src/test/results/clientpositive/analyze_tbl_part.q.out index 40b926c..4f13cf4 100644 --- a/ql/src/test/results/clientpositive/analyze_tbl_part.q.out +++ b/ql/src/test/results/clientpositive/analyze_tbl_part.q.out @@ -1,3 +1,7 @@ +PREHOOK: query: drop table if exists src_stat_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists src_stat_part +POSTHOOK: type: DROPTABLE PREHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -51,6 +55,19 @@ POSTHOOK: Input: default@src_stat_part # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment key string 0 14 1.72 3 from deserializer +PREHOOK: query: ALTER TABLE src_stat_part PARTITION(partitionId=1) UPDATE STATISTICS for column key SET ('numDVs'='11','avgColLen'='2.2') +PREHOOK: type: ALTERTABLE_UPDATEPARTSTATS +POSTHOOK: query: ALTER TABLE src_stat_part PARTITION(partitionId=1) UPDATE STATISTICS for column key SET ('numDVs'='11','avgColLen'='2.2') +POSTHOOK: type: ALTERTABLE_UPDATEPARTSTATS +PREHOOK: query: describe formatted src_stat_part.key PARTITION(partitionId=1) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_stat_part +POSTHOOK: query: describe formatted src_stat_part.key PARTITION(partitionId=1) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_stat_part +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key string 0 11 2.2 3 from deserializer PREHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value PREHOOK: type: QUERY PREHOOK: Input: default@src_stat_part @@ -72,12 +89,79 @@ POSTHOOK: Input: default@src_stat_part # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment key string 0 14 1.72 3 from deserializer -PREHOOK: query: describe formatted src_stat_part.value PARTITION(partitionId=2) +PREHOOK: query: ALTER TABLE src_stat_part DROP IF EXISTS PARTITION(partitionId=2) +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@src_stat_part +PREHOOK: Output: default@src_stat_part@partitionid=2 +POSTHOOK: query: ALTER TABLE src_stat_part DROP IF EXISTS PARTITION(partitionId=2) +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@src_stat_part +POSTHOOK: Output: default@src_stat_part@partitionid=2 +PREHOOK: query: insert overwrite table src_stat_part partition (partitionId=4) +select * from src1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: default@src_stat_part@partitionid=4 +POSTHOOK: query: insert overwrite table src_stat_part partition (partitionId=4) +select * from src1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@src_stat_part@partitionid=4 +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=4).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=4).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@src_stat_part +PREHOOK: Input: default@src_stat_part@partitionid=1 +PREHOOK: Input: default@src_stat_part@partitionid=4 +#### A masked pattern was here #### +POSTHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_stat_part +POSTHOOK: Input: default@src_stat_part@partitionid=1 +POSTHOOK: Input: default@src_stat_part@partitionid=4 +#### A masked pattern was here #### +PREHOOK: query: describe formatted src_stat_part.key PARTITION(partitionId=1) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_stat_part +POSTHOOK: query: describe formatted src_stat_part.key PARTITION(partitionId=1) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_stat_part +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key string 0 14 1.72 3 from deserializer +PREHOOK: query: insert overwrite table src_stat_part partition (partitionId=2) +select * from src1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: default@src_stat_part@partitionid=2 +POSTHOOK: query: insert overwrite table src_stat_part partition (partitionId=2) +select * from src1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@src_stat_part@partitionid=2 +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@src_stat_part +PREHOOK: Input: default@src_stat_part@partitionid=1 +PREHOOK: Input: default@src_stat_part@partitionid=2 +PREHOOK: Input: default@src_stat_part@partitionid=4 +#### A masked pattern was here #### +POSTHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_stat_part +POSTHOOK: Input: default@src_stat_part@partitionid=1 +POSTHOOK: Input: default@src_stat_part@partitionid=2 +POSTHOOK: Input: default@src_stat_part@partitionid=4 +#### A masked pattern was here #### +PREHOOK: query: describe formatted src_stat_part.key PARTITION(partitionId=4) PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_stat_part -POSTHOOK: query: describe formatted src_stat_part.value PARTITION(partitionId=2) +POSTHOOK: query: describe formatted src_stat_part.key PARTITION(partitionId=4) POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src_stat_part # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment -value string 0 14 4.92 7 from deserializer +key string 0 14 1.72 3 from deserializer