diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml
index e194441..c3cbb89 100644
--- a/itests/qtest/pom.xml
+++ b/itests/qtest/pom.xml
@@ -36,7 +36,7 @@
false
false
- list_bucket_dml_10.q,input16_cc.q,scriptfile1.q,scriptfile1_win.q,bucket4.q,bucketmapjoin6.q,disable_merge_for_bucketing.q,reduce_deduplicate.q,smb_mapjoin_8.q,join1.q,groupby2.q,bucketizedhiveinputformat.q,bucketmapjoin7.q,optrstat_groupby.q,bucket_num_reducers.q,bucket5.q,load_fs2.q,bucket_num_reducers2.q,infer_bucket_sort_merge.q,infer_bucket_sort_reducers_power_two.q,infer_bucket_sort_dyn_part.q,infer_bucket_sort_bucketed_table.q,infer_bucket_sort_map_operators.q,infer_bucket_sort_num_buckets.q,leftsemijoin_mr.q,schemeAuthority.q,schemeAuthority2.q,truncate_column_buckets.q,remote_script.q,,load_hdfs_file_with_space_in_the_name.q,parallel_orderby.q,import_exported_table.q,stats_counter.q
+ stats_counter_partitioned.q,list_bucket_dml_10.q,input16_cc.q,scriptfile1.q,scriptfile1_win.q,bucket4.q,bucketmapjoin6.q,disable_merge_for_bucketing.q,reduce_deduplicate.q,smb_mapjoin_8.q,join1.q,groupby2.q,bucketizedhiveinputformat.q,bucketmapjoin7.q,optrstat_groupby.q,bucket_num_reducers.q,bucket5.q,load_fs2.q,bucket_num_reducers2.q,infer_bucket_sort_merge.q,infer_bucket_sort_reducers_power_two.q,infer_bucket_sort_dyn_part.q,infer_bucket_sort_bucketed_table.q,infer_bucket_sort_map_operators.q,infer_bucket_sort_num_buckets.q,leftsemijoin_mr.q,schemeAuthority.q,schemeAuthority2.q,truncate_column_buckets.q,remote_script.q,,load_hdfs_file_with_space_in_the_name.q,parallel_orderby.q,import_exported_table.q,stats_counter.q
cluster_tasklog_retrieval.q,minimr_broken_pipe.q,mapreduce_stack_trace.q,mapreduce_stack_trace_turnoff.q,mapreduce_stack_trace_hadoop20.q,mapreduce_stack_trace_turnoff_hadoop20.q
add_part_exist.q,alter1.q,alter2.q,alter4.q,alter5.q,alter_rename_partition.q,alter_rename_partition_authorization.q,archive.q,archive_corrupt.q,archive_multi.q,archive_mr_1806.q,archive_multi_mr_1806.q,authorization_1.q,authorization_2.q,authorization_4.q,authorization_5.q,authorization_6.q,authorization_7.q,ba_table1.q,ba_table2.q,ba_table3.q,ba_table_udfs.q,binary_table_bincolserde.q,binary_table_colserde.q,cluster.q,columnarserde_create_shortcut.q,combine2.q,constant_prop.q,create_nested_type.q,create_or_replace_view.q,create_struct_table.q,create_union_table.q,database.q,database_location.q,database_properties.q,ddltime.q,describe_database_json.q,drop_database_removes_partition_dirs.q,escape1.q,escape2.q,exim_00_nonpart_empty.q,exim_01_nonpart.q,exim_02_00_part_empty.q,exim_02_part.q,exim_03_nonpart_over_compat.q,exim_04_all_part.q,exim_04_evolved_parts.q,exim_05_some_part.q,exim_06_one_part.q,exim_07_all_part_over_nonoverlap.q,exim_08_nonpart_rename.q,exim_09_part_spec_nonoverlap.q,exim_10_external_managed.q,exim_11_managed_external.q,exim_12_external_location.q,exim_13_managed_location.q,exim_14_managed_location_over_existing.q,exim_15_external_part.q,exim_16_part_external.q,exim_17_part_managed.q,exim_18_part_external.q,exim_19_00_part_external_location.q,exim_19_part_external_location.q,exim_20_part_managed_location.q,exim_21_export_authsuccess.q,exim_22_import_exist_authsuccess.q,exim_23_import_part_authsuccess.q,exim_24_import_nonexist_authsuccess.q,global_limit.q,groupby_complex_types.q,groupby_complex_types_multi_single_reducer.q,index_auth.q,index_auto.q,index_auto_empty.q,index_bitmap.q,index_bitmap1.q,index_bitmap2.q,index_bitmap3.q,index_bitmap_auto.q,index_bitmap_rc.q,index_compact.q,index_compact_1.q,index_compact_2.q,index_compact_3.q,index_stale_partitioned.q,init_file.q,input16.q,input16_cc.q,input46.q,input_columnarserde.q,input_dynamicserde.q,input_lazyserde.q,input_testxpath3.q,input_testxpath4.q,insert2_overwrite_partitions.q,insertexternal1.q,join_thrift.q,lateral_view.q,load_binary_data.q,load_exist_part_authsuccess.q,load_nonpart_authsuccess.q,load_part_authsuccess.q,loadpart_err.q,lock1.q,lock2.q,lock3.q,lock4.q,merge_dynamic_partition.q,multi_insert.q,multi_insert_move_tasks_share_dependencies.q,null_column.q,ppd_clusterby.q,query_with_semi.q,rename_column.q,sample6.q,sample_islocalmode_hook.q,set_processor_namespaces.q,show_tables.q,source.q,split_sample.q,str_to_map.q,transform1.q,udaf_collect_set.q,udaf_context_ngrams.q,udaf_histogram_numeric.q,udaf_ngrams.q,udaf_percentile_approx.q,udf_array.q,udf_bitmap_and.q,udf_bitmap_or.q,udf_explode.q,udf_format_number.q,udf_map.q,udf_map_keys.q,udf_map_values.q,udf_max.q,udf_min.q,udf_named_struct.q,udf_percentile.q,udf_printf.q,udf_sentences.q,udf_sort_array.q,udf_split.q,udf_struct.q,udf_substr.q,udf_translate.q,udf_union.q,udf_xpath.q,udtf_stack.q,view.q,virtual_column.q
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index e7453c7..a993664 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -52,6 +52,7 @@
import org.apache.hadoop.hive.ql.plan.PlanUtils;
import org.apache.hadoop.hive.ql.plan.SkewedColumnPositionPair;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
+import org.apache.hadoop.hive.ql.stats.CounterStatsPublisher;
import org.apache.hadoop.hive.ql.stats.StatsPublisher;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.SerDeStats;
@@ -974,15 +975,24 @@ private void publishStats() throws HiveException {
// construct the key(fileID) to insert into the intermediate stats table
if (fspKey == "") {
- // for non-partitioned/static partitioned table, the key for temp storage is
+ if (statsPublisher instanceof CounterStatsPublisher) {
+ key = conf.getTableInfo().getTableName() + spSpec;
+ } else {
+ // for non-partitioned/static partitioned table, the key for temp storage is
// common key prefix + static partition spec + taskID
String keyPrefix = Utilities.getHashedStatsPrefix(
conf.getStatsAggPrefix() + spSpec, conf.getMaxStatsKeyPrefixLength());
key = keyPrefix + taskID;
+ }
} else {
// for partitioned table, the key is
// common key prefix + static partition spec + DynamicPartSpec + taskID
- key = createKeyForStatsPublisher(taskID, spSpec, fspKey);
+ if (statsPublisher instanceof CounterStatsPublisher) {
+ key = conf.getStatsAggPrefix() + spSpec;
+ } else {
+ key = createKeyForStatsPublisher(taskID, spSpec, fspKey);
+ }
+
}
Map statsToPublish = new HashMap();
for (String statType : fspValue.stat.getStoredStats()) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
index 142af10..2b85240 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
@@ -29,8 +29,10 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -222,11 +224,20 @@ private int aggregateStats() {
}
// In case of a non-partitioned table, the key for stats temporary store is "rootDir"
if (statsAggregator != null) {
- String aggKey = Utilities.getHashedStatsPrefix(work.getAggKey(), maxPrefixLength);
+ String aggKey;
+ if(HiveConf.getVar(conf, ConfVars.HIVESTATSDBCLASS).equals("counter")) {
+ aggKey = table.getDbName()+"."+table.getTableName();
+ // there is no need to aggregate stats in this case, but this should also work.
+ // also check non-partitioned code path.
+ } else {
+ aggKey = Utilities.getHashedStatsPrefix(work.getAggKey(), maxPrefixLength);
+ }
updateStats(StatsSetupConst.statsRequireCompute, tblStats, statsAggregator, parameters,
aggKey, atomic);
statsAggregator.cleanUp(aggKey);
}
+
+
// The collectable stats for the aggregator needs to be cleared.
// For eg. if a file is being loaded, the old number of rows are not valid
else if (work.isClearAggregatorStats()) {
@@ -236,7 +247,7 @@ else if (work.isClearAggregatorStats()) {
}
}
}
-
+
// write table stats to metastore
parameters = tTable.getParameters();
for (String statType : StatsSetupConst.statsRequireCompute) {
@@ -278,16 +289,28 @@ else if (work.isClearAggregatorStats()) {
//
Statistics newPartStats = new Statistics();
- // In that case of a partition, the key for stats temporary store is
- // "rootDir/[dynamic_partition_specs/]%"
- String partitionID = Utilities.getHashedStatsPrefix(
- work.getAggKey() + Warehouse.makePartPath(partn.getSpec()), maxPrefixLength);
+ String partitionID;
+
+ if(HiveConf.getVar(conf, ConfVars.HIVESTATSDBCLASS).equals("counter")) {
+ partitionID = table.getTableName() +Path.SEPARATOR + Warehouse.makePartPath(partn.getSpec());
+ // there is no need to aggregate stats in this case, but this should also work.
+ // also check non-partitioned code path.
+ } else {
+ // In that case of a partition, the key for stats temporary store is
+ // "rootDir/[dynamic_partition_specs/]%"
+ partitionID = Utilities.getHashedStatsPrefix(
+ work.getAggKey() + Warehouse.makePartPath(partn.getSpec()), maxPrefixLength);
+ }
+
LOG.info("Stats aggregator : " + partitionID);
if (statsAggregator != null) {
updateStats(StatsSetupConst.statsRequireCompute, newPartStats, statsAggregator,
parameters, partitionID, atomic);
+ for (String stat : newPartStats.stats.keySet()) {
+ LOG.debug("Stat : " + stat + "\t" + newPartStats.stats.get(stat));
+ }
statsAggregator.cleanUp(partitionID);
} else {
for (String statType : StatsSetupConst.statsRequireCompute) {
@@ -303,7 +326,9 @@ else if (work.isClearAggregatorStats()) {
}
}
}
-
+ for (String stat : newPartStats.stats.keySet()) {
+ LOG.debug("Stat : " + stat + "\t" + newPartStats.stats.get(stat));
+ }
/**
* calculate fast statistics
*/
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
index 8d895f4..bad1383 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
@@ -28,6 +28,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
@@ -296,11 +298,20 @@ private void publishStats() throws HiveException {
conf.getStatsAggPrefix(), conf.getMaxStatsKeyPrefixLength());
key = keyPrefix + taskID;
} else {
- // In case of a partition, the key for temp storage is
- // "tableName + partitionSpecs + taskID"
- String keyPrefix = Utilities.getHashedStatsPrefix(
- conf.getStatsAggPrefix() + pspecs, conf.getMaxStatsKeyPrefixLength());
- key = keyPrefix + taskID;
+
+ if (HiveConf.getVar(hconf, ConfVars.HIVESTATSDBCLASS).equals("counter")) {
+ // Can we do something within CounterStatsPublisher to avoid this special logic
+ // in TSOp.
+ // also, do we need same changes in FileSinkOp?
+ // Also, check non-partitioned tables case.
+ key = Utilities.appendPathSeparator(conf.getStatsAggPrefix() + pspecs);
+ } else {
+ // In case of a partition, the key for temp storage is
+ // "tableName + partitionSpecs + taskID"
+ String keyPrefix = Utilities.getHashedStatsPrefix(
+ conf.getStatsAggPrefix() + pspecs, conf.getMaxStatsKeyPrefixLength());
+ key = keyPrefix + taskID;
+ }
}
for(String statType : stats.get(pspecs).getStoredStats()) {
statsToPublish.put(statType, Long.toString(stats.get(pspecs).getStat(statType)));
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 365c22a..eaa3f05 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -2291,7 +2291,7 @@ public static String getHashedStatsPrefix(String statsPrefix, int maxPrefixLengt
return ret;
}
- private static String appendPathSeparator(String path) {
+ public static String appendPathSeparator(String path) {
if (!path.endsWith(Path.SEPARATOR)) {
path = path + Path.SEPARATOR;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
index 0268f98..af729e6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
import org.apache.hadoop.hive.ql.io.rcfile.stats.PartialScanWork;
import org.apache.hadoop.hive.ql.lib.Node;
import org.apache.hadoop.hive.ql.lib.NodeProcessor;
@@ -95,6 +96,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx,
StatsWork statsWork = new StatsWork(parseCtx.getQB().getParseInfo().getTableSpec());
statsWork.setAggKey(op.getConf().getStatsAggPrefix());
+ statsWork.setSourceTask((MapRedTask)currTask);
statsWork.setStatsReliable(
parseCtx.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
Task statsTask = TaskFactory.get(statsWork, parseCtx.getConf());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java
index 2cc2519..17f5d7a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hive.ql.stats;
import java.io.IOException;
+import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -26,6 +27,7 @@
import org.apache.hadoop.hive.ql.exec.mr.ExecDriver;
import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.Counters.Counter;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RunningJob;
@@ -46,6 +48,7 @@ public boolean connect(Configuration hconf, MapRedTask sourceTask) {
counters = job.getCounters();
}
} catch (Exception e) {
+ LOG.error(e);
LOG.error("Failed to get Job instance for " + sourceTask.getJobID());
}
return counters != null;
@@ -57,13 +60,17 @@ private JobConf toJobConf(Configuration hconf) {
@Override
public String aggregateStats(String keyPrefix, String statType) {
- long value = 0;
- for (String groupName : counters.getGroupNames()) {
- if (groupName.startsWith(keyPrefix)) {
- value += counters.getGroup(groupName).getCounter(statType);
- }
- }
- return String.valueOf(value);
+ LOG.debug("key : " + keyPrefix + "\t" + statType);
+ for (String cntrName : counters.getGroupNames()) {
+ LOG.info("Counter grp name: " + cntrName);
+ Iterator cntItr = counters.getGroup(cntrName).iterator();
+ while(cntItr.hasNext()) {
+ Counter cntr = cntItr.next();
+ LOG.info("Counter value: " + cntr.getName() + "\t" + cntr.getValue());
+ }
+
+ }
+ return String.valueOf(counters.getGroup(keyPrefix).getCounter(statType));
}
@Override
diff --git a/ql/src/test/queries/clientpositive/stats_counter_partitioned.q b/ql/src/test/queries/clientpositive/stats_counter_partitioned.q
new file mode 100644
index 0000000..233627e
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/stats_counter_partitioned.q
@@ -0,0 +1,13 @@
+set hive.stats.dbclass=counter;
+set hive.stats.autogather=true;
+
+create table dummy (key string, value string) partitioned by (ds string, hr string);
+
+load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='12');
+load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='11');
+
+analyze table dummy partition (ds,hr) compute statistics;
+describe formatted dummy partition (ds='2008', hr='11');
+describe formatted dummy partition (ds='2008', hr='12');
+
+drop table dummy;
diff --git a/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out b/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out
new file mode 100644
index 0000000..4f3be33
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out
@@ -0,0 +1,123 @@
+PREHOOK: query: create table dummy (key string, value string) partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table dummy (key string, value string) partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@dummy
+PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='12')
+PREHOOK: type: LOAD
+PREHOOK: Output: default@dummy
+POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='12')
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@dummy
+POSTHOOK: Output: default@dummy@ds=2008/hr=12
+PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='11')
+PREHOOK: type: LOAD
+PREHOOK: Output: default@dummy
+POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='11')
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@dummy
+POSTHOOK: Output: default@dummy@ds=2008/hr=11
+PREHOOK: query: analyze table dummy partition (ds,hr) compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dummy
+PREHOOK: Input: default@dummy@ds=2008/hr=11
+PREHOOK: Input: default@dummy@ds=2008/hr=12
+PREHOOK: Output: default@dummy
+PREHOOK: Output: default@dummy@ds=2008/hr=11
+PREHOOK: Output: default@dummy@ds=2008/hr=12
+POSTHOOK: query: analyze table dummy partition (ds,hr) compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dummy
+POSTHOOK: Input: default@dummy@ds=2008/hr=11
+POSTHOOK: Input: default@dummy@ds=2008/hr=12
+POSTHOOK: Output: default@dummy
+POSTHOOK: Output: default@dummy@ds=2008/hr=11
+POSTHOOK: Output: default@dummy@ds=2008/hr=12
+PREHOOK: query: describe formatted dummy partition (ds='2008', hr='11')
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe formatted dummy partition (ds='2008', hr='11')
+POSTHOOK: type: DESCTABLE
+# col_name data_type comment
+
+key string None
+value string None
+
+# Partition Information
+# col_name data_type comment
+
+ds string None
+hr string None
+
+# Detailed Partition Information
+Partition Value: [2008, 11]
+Database: default
+Table: dummy
+#### A masked pattern was here ####
+Protect Mode: None
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: describe formatted dummy partition (ds='2008', hr='12')
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe formatted dummy partition (ds='2008', hr='12')
+POSTHOOK: type: DESCTABLE
+# col_name data_type comment
+
+key string None
+value string None
+
+# Partition Information
+# col_name data_type comment
+
+ds string None
+hr string None
+
+# Detailed Partition Information
+Partition Value: [2008, 12]
+Database: default
+Table: dummy
+#### A masked pattern was here ####
+Protect Mode: None
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table dummy
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@dummy
+PREHOOK: Output: default@dummy
+POSTHOOK: query: drop table dummy
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@dummy
+POSTHOOK: Output: default@dummy