Index: eclipse-templates/.classpath._hbase
===================================================================
--- eclipse-templates/.classpath._hbase (revision 0)
+++ eclipse-templates/.classpath._hbase (revision 0)
@@ -0,0 +1,59 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Index: eclipse-templates/TestHBaseCliDriver.launchtemplate
===================================================================
--- eclipse-templates/TestHBaseCliDriver.launchtemplate (revision 0)
+++ eclipse-templates/TestHBaseCliDriver.launchtemplate (revision 0)
@@ -0,0 +1,26 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Index: .classpath._hbase
===================================================================
--- .classpath._hbase (revision 0)
+++ .classpath._hbase (revision 0)
@@ -0,0 +1,59 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Index: conf/hive-default.xml
===================================================================
--- conf/hive-default.xml (revision 982011)
+++ conf/hive-default.xml (working copy)
@@ -594,6 +594,30 @@
+ hive.stats.dbclass
+ jdbc:derby
+ The default database that stores temporary hive statistics.
+
+
+
+ hive.stats.autogather
+ true
+ A flag to gather statistics automatically during the INSERT OVERWRITE command.
+
+
+
+ hive.stats.jdbcdriver
+ org.apache.derby.jdbc.EmbeddedDriver
+ The JDBC driver for the database that stores temporary hive statistics.
+
+
+
+ hive.stats.dbconnectionstring
+ jdbc:derby:;databaseName=TempStatsStore;create=true
+ The default connection string for the database that stores temporary hive statistics.
+
+
+
fs.har.impl
org.apache.hadoop.hive.shims.HiveHarFileSystem
The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop vers less than 0.20
Index: hbase-handler/src/test/results/hbase_stats.q.out
===================================================================
--- hbase-handler/src/test/results/hbase_stats.q.out (revision 0)
+++ hbase-handler/src/test/results/hbase_stats.q.out (revision 0)
@@ -0,0 +1,12 @@
+PREHOOK: query: analyze table srcpart compute statistics
+PREHOOK: type: null
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: query: analyze table srcpart compute statistics
+POSTHOOK: type: null
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
Index: hbase-handler/src/test/queries/hbase_stats.q
===================================================================
--- hbase-handler/src/test/queries/hbase_stats.q (revision 0)
+++ hbase-handler/src/test/queries/hbase_stats.q (revision 0)
@@ -0,0 +1,2 @@
+set hive.stats.dbclass=hbase;
+analyze table srcpart compute statistics;
\ No newline at end of file
Index: hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStatsPublisher.java
===================================================================
--- hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStatsPublisher.java (revision 0)
+++ hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStatsPublisher.java (revision 0)
@@ -0,0 +1,152 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.hbase;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.RowLock;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hive.ql.stats.*;
+
+/**
+ * A class that implements the StatsPublisher interface through HBase.
+ */
+public class HBaseStatsPublisher implements StatsPublisher {
+
+ private HTable htable;
+ private byte[] rowCountFamily, rowCountColumn;
+ private final Log LOG = LogFactory.getLog(this.getClass().getName());
+
+ /**
+ * Does the necessary HBase initializations.
+ */
+ public boolean connect(Configuration hiveconf) {
+
+ try {
+ HBaseConfiguration hbaseConf = new HBaseConfiguration(hiveconf);
+ HBaseAdmin hbase = new HBaseAdmin(hbaseConf);
+ rowCountFamily = Bytes.toBytes(HBaseStatsSetupConstants.PART_STAT_ROW_COUNT_COLUMN_FAMILY);
+ rowCountColumn = Bytes.toBytes(HBaseStatsSetupConstants.PART_STAT_ROW_COUNT_COLUMN_NAME);
+ htable = new HTable(HBaseStatsSetupConstants.PART_STAT_TABLE_NAME);
+ } catch (IOException e) {
+ LOG.error("Error during HBase initialization. " + e);
+ return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * Writes temporary statistics into HBase;
+ */
+ public boolean publishStat(String rowID, String key, String value) {
+
+ boolean success = true;
+
+ // Write in HBase
+ RowLock rowLock = null;
+ try {
+ rowLock = htable.lockRow(Bytes.toBytes(rowID));
+
+ Get get = new Get(Bytes.toBytes(rowID), rowLock);
+ Result result = htable.get(get);
+ int val = Integer.parseInt(value);
+ if (!result.isEmpty()) {
+ if (key == StatsSetupConst.ROW_COUNT) {
+ val += Integer.parseInt(Bytes.toString(result.getValue(rowCountFamily, rowCountColumn)));
+ }
+ else {
+ LOG.warn("Warning. Invalid statistic. Currently " +
+ "row count is the only supported statistic");
+ return false;
+ }
+ }
+ Put row = new Put(Bytes.toBytes(rowID), rowLock);
+ if (key == (StatsSetupConst.ROW_COUNT)) {
+ row.add(rowCountFamily, rowCountColumn, Bytes.toBytes(Integer.toString(val)));
+ }
+ else {
+ LOG.warn("Warning. Invalid statistic. Currently " +
+ "row count is the only supported statistic");
+ return false;
+ }
+ htable.put(row);
+
+ } catch (IOException e) {
+ LOG.error("Error during publishing statistics. " + e);
+ success = false;
+ }
+ finally {
+ try {
+ htable.unlockRow(rowLock);
+ }
+ catch (IOException e) {
+ LOG.error("Error during publishing statistics. " + e);
+ success = false;
+ }
+ }
+
+ return success;
+ }
+
+ public boolean closeConnection() {
+ return true;
+ }
+
+
+ /**
+ * Does the necessary HBase initializations.
+ */
+ public boolean init(Configuration hiveconf) {
+ try {
+ HBaseConfiguration hbaseConf = new HBaseConfiguration(hiveconf);
+ HBaseAdmin hbase = new HBaseAdmin(hbaseConf);
+
+ rowCountFamily = Bytes.toBytes(HBaseStatsSetupConstants.PART_STAT_ROW_COUNT_COLUMN_FAMILY);
+ rowCountColumn = Bytes.toBytes(HBaseStatsSetupConstants.PART_STAT_ROW_COUNT_COLUMN_NAME);
+
+ // Creating table if not exists
+ if (!hbase.tableExists(HBaseStatsSetupConstants.PART_STAT_TABLE_NAME)) {
+ HTableDescriptor table = new HTableDescriptor(HBaseStatsSetupConstants.PART_STAT_TABLE_NAME);
+
+ HColumnDescriptor rowCount = new HColumnDescriptor(rowCountFamily);
+ table.addFamily(rowCount);
+
+ hbase.createTable(table);
+ }
+ } catch (IOException e) {
+ LOG.error("Error during HBase initialization. " + e);
+ return false;
+ }
+
+ return true;
+ }
+
+}
Index: hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStatsSetupConstants.java
===================================================================
--- hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStatsSetupConstants.java (revision 0)
+++ hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStatsSetupConstants.java (revision 0)
@@ -0,0 +1,11 @@
+package org.apache.hadoop.hive.hbase;
+
+public final class HBaseStatsSetupConstants {
+
+ public static final String PART_STAT_TABLE_NAME = "PARTITION_STAT_TBL";
+
+ public static final String PART_STAT_ROW_COUNT_COLUMN_NAME = "ROW_COUNT";
+
+ public static final String PART_STAT_ROW_COUNT_COLUMN_FAMILY = "ROW_COUNT_FAMILY";
+
+}
Index: hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStatsAggregator.java
===================================================================
--- hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStatsAggregator.java (revision 0)
+++ hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStatsAggregator.java (revision 0)
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.hbase;
+
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Iterator;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hive.ql.stats.*;
+
+
+/**
+ * A class that implements the StatsAggregator interface through HBase.
+ */
+public class HBaseStatsAggregator implements StatsAggregator {
+
+ private HTable htable;
+ private byte[] rowCountFamily, rowCountColumn;
+ private final Log LOG = LogFactory.getLog(this.getClass().getName());
+
+ /**
+ * Does the necessary HBase initializations.
+ */
+ public boolean connect(Configuration hiveconf) {
+
+ try {
+ HBaseConfiguration hbaseConf = new HBaseConfiguration(hiveconf);
+ HBaseAdmin hbase = new HBaseAdmin(hbaseConf);
+ rowCountFamily = Bytes.toBytes(HBaseStatsSetupConstants.PART_STAT_ROW_COUNT_COLUMN_FAMILY);
+ rowCountColumn = Bytes.toBytes(HBaseStatsSetupConstants.PART_STAT_ROW_COUNT_COLUMN_NAME);
+ htable = new HTable(HBaseStatsSetupConstants.PART_STAT_TABLE_NAME);
+
+ } catch (IOException e) {
+ LOG.error("Error during HBase initializations. " + e);
+ return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * Aggregates temporary stats from HBase;
+ */
+ public String aggregateStats(String rowID, String key) {
+
+ byte[] retValue = null;
+
+ try {
+ Get get = new Get(Bytes.toBytes(rowID));
+ Result result = htable.get(get);
+
+ // Row Count
+ if (key == StatsSetupConst.ROW_COUNT) {
+ retValue = result.getValue(rowCountFamily, rowCountColumn);
+ /* Automatic Cleaning:
+ IMPORTANT: Since we publish and aggregate only 1 value (1 column) which is the row count, it
+ is valid to delete the row after aggregation (automatic cleaning) because we know that there is no
+ other values to aggregate.
+ If ;in the future; other values are aggregated and published, then we cannot do cleaning except
+ when we are sure that all values are aggregated, or we can separate the implementation of cleaning
+ through a separate method which the developer has to call it manually in the code.
+ */
+ Delete delete = new Delete(Bytes.toBytes(rowID));
+ htable.delete(delete);
+ }
+ else {
+ LOG.warn("Warning. Invalid statistic. Currently " +
+ "row count is the only supported statistic");
+ return null;
+ }
+ } catch (IOException e) {
+ LOG.error("Error during publishing aggregation. " + e);
+ return null;
+ }
+
+ return Bytes.toString(retValue);
+ }
+
+ public boolean closeConnection() {
+ return true;
+ }
+
+}
Index: lib/mysql-connector-java-5.1.6-bin.jar
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes on: lib/mysql-connector-java-5.1.6-bin.jar
___________________________________________________________________
Added: svn:mime-type
+ application/octet-stream
Index: build-common.xml
===================================================================
--- build-common.xml (revision 982011)
+++ build-common.xml (working copy)
@@ -412,7 +412,7 @@
-->
-
+
Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
===================================================================
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 982011)
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy)
@@ -262,6 +262,12 @@
HIVEOPTSORTMERGEBUCKETMAPJOIN("hive.optimize.bucketmapjoin.sortedmerge", false), // try to use sorted merge bucket map join
HIVEOPTREDUCEDEDUPLICATION("hive.optimize.reducededuplication", true),
+ // Statistics
+ HIVESTATSDBCLASS("hive.stats.dbclass", "jdbc:derby"), // jdbc:derby, jdbc:mysql, hbase or whatever. See StatsSetupConst.java to set up these values
+ HIVESTATSAUTOGATHER("hive.stats.autogather", true), // To gather statistics automatically during the INSERT OVERWRITE command
+ HIVESTATSJDBCDRIVER("hive.stats.jdbcdriver", "org.apache.derby.jdbc.EmbeddedDriver"),
+ HIVESTATSDBCONNECTIONSTRING("hive.stats.dbconnectionstring", "jdbc:derby:;databaseName=TempStatsStore;create=true"),//jdbc:mysql://cdb043.sf2p.facebook.com/stats_ahmed_test;user=www;password=A3V6IPS1vg
+
// For HBase storage handler
HIVE_HBASE_WAL_ENABLED("hive.hbase.wal.enabled", true),
Index: ql/src/test/results/clientpositive/stats8.q.out
===================================================================
--- ql/src/test/results/clientpositive/stats8.q.out (revision 0)
+++ ql/src/test/results/clientpositive/stats8.q.out (revision 0)
@@ -0,0 +1,52 @@
+PREHOOK: query: analyze table srcpart PARTITION(ds, hr) compute statistics
+PREHOOK: type: null
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: query: analyze table srcpart PARTITION(ds, hr) compute statistics
+POSTHOOK: type: null
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: query: describe extended srcpart PARTITION(ds='2008-04-08',hr=11)
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended srcpart PARTITION(ds='2008-04-08',hr=11)
+POSTHOOK: type: DESCTABLE
+key string default
+value string default
+ds string
+hr string
+
+Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:srcpart, createTime:1280872528, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1280877203, numRows=500, totalSize=4096})
+PREHOOK: query: describe extended srcpart PARTITION(ds='2008-04-09',hr=12)
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended srcpart PARTITION(ds='2008-04-09',hr=12)
+POSTHOOK: type: DESCTABLE
+key string default
+value string default
+ds string
+hr string
+
+Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:srcpart, createTime:1280872529, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1280877203, numRows=500, totalSize=4096})
+PREHOOK: query: describe extended srcpart PARTITION(ds='2008-04-08',hr=11)
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended srcpart PARTITION(ds='2008-04-08',hr=11)
+POSTHOOK: type: DESCTABLE
+key string default
+value string default
+ds string
+hr string
+
+Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:srcpart, createTime:1280872528, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1280877203, numRows=500, totalSize=4096})
+PREHOOK: query: describe extended srcpart PARTITION(ds='2008-04-09',hr=12)
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended srcpart PARTITION(ds='2008-04-09',hr=12)
+POSTHOOK: type: DESCTABLE
+key string default
+value string default
+ds string
+hr string
+
+Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:srcpart, createTime:1280872529, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1280877203, numRows=500, totalSize=4096})
Index: ql/src/test/results/clientpositive/stats3.q.out
===================================================================
--- ql/src/test/results/clientpositive/stats3.q.out (revision 0)
+++ ql/src/test/results/clientpositive/stats3.q.out (revision 0)
@@ -0,0 +1,132 @@
+PREHOOK: query: drop table hive_test_src
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table hive_test_src
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table hive_test_dst
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table hive_test_dst
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table hive_test_src ( col1 string ) stored as textfile
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table hive_test_src ( col1 string ) stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@hive_test_src
+PREHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table hive_test_src
+PREHOOK: type: LOAD
+POSTHOOK: query: load data local inpath '../data/files/test.dat' overwrite into table hive_test_src
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@hive_test_src
+PREHOOK: query: create table hive_test_dst ( col1 string ) partitioned by ( pcol1 string , pcol2 string) stored as sequencefile
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table hive_test_dst ( col1 string ) partitioned by ( pcol1 string , pcol2 string) stored as sequencefile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@hive_test_dst
+PREHOOK: query: insert overwrite table hive_test_dst partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@hive_test_src
+PREHOOK: Output: default@hive_test_dst@pcol1=test_part/pcol2=test_Part
+POSTHOOK: query: insert overwrite table hive_test_dst partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@hive_test_src
+POSTHOOK: Output: default@hive_test_dst@pcol1=test_part/pcol2=test_Part
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+PREHOOK: query: select * from hive_test_dst where pcol1='test_part' and pcol2='test_Part'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part
+PREHOOK: Output: file:/tmp/aaly/hive_2010-08-04_12-48-16_367_3909580618833730976/-mr-10000
+POSTHOOK: query: select * from hive_test_dst where pcol1='test_part' and pcol2='test_Part'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part
+POSTHOOK: Output: file:/tmp/aaly/hive_2010-08-04_12-48-16_367_3909580618833730976/-mr-10000
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+1 test_part test_Part
+2 test_part test_Part
+3 test_part test_Part
+4 test_part test_Part
+5 test_part test_Part
+6 test_part test_Part
+PREHOOK: query: select count(1) from hive_test_dst
+PREHOOK: type: QUERY
+PREHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part
+PREHOOK: Output: file:/tmp/aaly/hive_2010-08-04_12-48-16_683_1151783909114489660/-mr-10000
+POSTHOOK: query: select count(1) from hive_test_dst
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part
+POSTHOOK: Output: file:/tmp/aaly/hive_2010-08-04_12-48-16_683_1151783909114489660/-mr-10000
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+6
+PREHOOK: query: insert overwrite table hive_test_dst partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@hive_test_src
+PREHOOK: Output: default@hive_test_dst@pcol1=test_part/pcol2=test_Part
+POSTHOOK: query: insert overwrite table hive_test_dst partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@hive_test_src
+POSTHOOK: Output: default@hive_test_dst@pcol1=test_part/pcol2=test_Part
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+PREHOOK: query: select * from hive_test_dst where pcol1='test_part' and pcol2='test_part'
+PREHOOK: type: QUERY
+PREHOOK: Output: file:/tmp/aaly/hive_2010-08-04_12-48-23_249_3869046444303707171/-mr-10000
+POSTHOOK: query: select * from hive_test_dst where pcol1='test_part' and pcol2='test_part'
+POSTHOOK: type: QUERY
+POSTHOOK: Output: file:/tmp/aaly/hive_2010-08-04_12-48-23_249_3869046444303707171/-mr-10000
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+PREHOOK: query: select count(1) from hive_test_dst
+PREHOOK: type: QUERY
+PREHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part
+PREHOOK: Output: file:/tmp/aaly/hive_2010-08-04_12-48-23_307_7755470075971345346/-mr-10000
+POSTHOOK: query: select count(1) from hive_test_dst
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part
+POSTHOOK: Output: file:/tmp/aaly/hive_2010-08-04_12-48-23_307_7755470075971345346/-mr-10000
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+6
+PREHOOK: query: select * from hive_test_dst where pcol1='test_part'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part
+PREHOOK: Output: file:/tmp/aaly/hive_2010-08-04_12-48-25_902_4804840223806837862/-mr-10000
+POSTHOOK: query: select * from hive_test_dst where pcol1='test_part'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part
+POSTHOOK: Output: file:/tmp/aaly/hive_2010-08-04_12-48-25_902_4804840223806837862/-mr-10000
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+1 test_part test_Part
+2 test_part test_Part
+3 test_part test_Part
+4 test_part test_Part
+5 test_part test_Part
+6 test_part test_Part
+PREHOOK: query: select * from hive_test_dst where pcol1='test_part' and pcol2='test_part'
+PREHOOK: type: QUERY
+PREHOOK: Output: file:/tmp/aaly/hive_2010-08-04_12-48-26_132_1036351978829772706/-mr-10000
+POSTHOOK: query: select * from hive_test_dst where pcol1='test_part' and pcol2='test_part'
+POSTHOOK: type: QUERY
+POSTHOOK: Output: file:/tmp/aaly/hive_2010-08-04_12-48-26_132_1036351978829772706/-mr-10000
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+PREHOOK: query: select * from hive_test_dst where pcol1='test_Part'
+PREHOOK: type: QUERY
+PREHOOK: Output: file:/tmp/aaly/hive_2010-08-04_12-48-26_185_545787673493639708/-mr-10000
+POSTHOOK: query: select * from hive_test_dst where pcol1='test_Part'
+POSTHOOK: type: QUERY
+POSTHOOK: Output: file:/tmp/aaly/hive_2010-08-04_12-48-26_185_545787673493639708/-mr-10000
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+PREHOOK: query: drop table hive_test_src
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table hive_test_src
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: default@hive_test_src
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+PREHOOK: query: drop table hive_test_dst
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table hive_test_dst
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: default@hive_test_dst
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
+POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ]
Index: ql/src/test/results/clientpositive/stats9.q.out
===================================================================
--- ql/src/test/results/clientpositive/stats9.q.out (revision 0)
+++ ql/src/test/results/clientpositive/stats9.q.out (revision 0)
@@ -0,0 +1,28 @@
+PREHOOK: query: analyze table srcbucket compute statistics
+PREHOOK: type: null
+PREHOOK: Input: default@srcbucket
+POSTHOOK: query: analyze table srcbucket compute statistics
+POSTHOOK: type: null
+POSTHOOK: Input: default@srcbucket
+PREHOOK: query: describe extended srcbucket
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended srcbucket
+POSTHOOK: type: DESCTABLE
+key int
+value string
+
+Detailed Table Information Table(tableName:srcbucket, dbName:default, owner:aaly, createTime:1280776830, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/srcbucket, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1280776838, numRows=1000, totalSize=4096}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
+PREHOOK: query: analyze table srcbucket2 compute statistics
+PREHOOK: type: null
+PREHOOK: Input: default@srcbucket2
+POSTHOOK: query: analyze table srcbucket2 compute statistics
+POSTHOOK: type: null
+POSTHOOK: Input: default@srcbucket2
+PREHOOK: query: describe extended srcbucket2
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended srcbucket2
+POSTHOOK: type: DESCTABLE
+key int
+value string
+
+Detailed Table Information Table(tableName:srcbucket2, dbName:default, owner:aaly, createTime:1280776831, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/srcbucket2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:4, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1280776842, numRows=500, totalSize=4096}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
Index: ql/src/test/results/clientpositive/stats4.q.out
===================================================================
--- ql/src/test/results/clientpositive/stats4.q.out (revision 0)
+++ ql/src/test/results/clientpositive/stats4.q.out (revision 0)
@@ -0,0 +1,2357 @@
+PREHOOK: query: show partitions srcpart
+PREHOOK: type: SHOWPARTITIONS
+POSTHOOK: query: show partitions srcpart
+POSTHOOK: type: SHOWPARTITIONS
+ds=2008-04-08/hr=11
+ds=2008-04-08/hr=12
+ds=2008-04-09/hr=11
+ds=2008-04-09/hr=12
+PREHOOK: query: drop table nzhang_part1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table nzhang_part1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table nzhang_part2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table nzhang_part2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table if not exists nzhang_part1 like srcpart
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table if not exists nzhang_part1 like srcpart
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@nzhang_part1
+PREHOOK: query: create table if not exists nzhang_part2 like srcpart
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table if not exists nzhang_part2 like srcpart
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@nzhang_part2
+PREHOOK: query: explain
+from srcpart
+insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from srcpart
+insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+ (TOK_QUERY (TOK_FROM (TOK_TABREF srcpart)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB nzhang_part1 (TOK_PARTSPEC (TOK_PARTVAL ds) (TOK_PARTVAL hr)))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_TABLE_OR_COL ds)) (TOK_SELEXPR (TOK_TABLE_OR_COL hr))) (TOK_WHERE (<= (TOK_TABLE_OR_COL ds) '2008-04-08'))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB nzhang_part2 (TOK_PARTSPEC (TOK_PARTVAL ds '2008-12-31') (TOK_PARTVAL hr)))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_TABLE_OR_COL hr))) (TOK_WHERE (> (TOK_TABLE_OR_COL ds) '2008-04-08'))))
+
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-1 depends on stages: Stage-2
+ Stage-4 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Map Reduce
+ Alias -> Map Operator Tree:
+ srcpart
+ TableScan
+ alias: srcpart
+ Filter Operator
+ predicate:
+ expr: (ds <= '2008-04-08')
+ type: boolean
+ Select Operator
+ expressions:
+ expr: key
+ type: string
+ expr: value
+ type: string
+ expr: ds
+ type: string
+ expr: hr
+ type: string
+ outputColumnNames: _col0, _col1, _col2, _col3
+ File Output Operator
+ compressed: false
+ GlobalTableId: 1
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: nzhang_part1
+ Filter Operator
+ predicate:
+ expr: (ds > '2008-04-08')
+ type: boolean
+ Select Operator
+ expressions:
+ expr: key
+ type: string
+ expr: value
+ type: string
+ expr: hr
+ type: string
+ outputColumnNames: _col0, _col1, _col2
+ File Output Operator
+ compressed: false
+ GlobalTableId: 2
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: nzhang_part2
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds
+ hr
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: nzhang_part1
+
+ Stage: Stage-3
+ Stats Operator
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ partition:
+ ds 2008-12-31
+ hr
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: nzhang_part2
+
+ Stage: Stage-4
+ Stats Operator
+
+
+PREHOOK: query: from srcpart
+insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: query: from srcpart
+insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@nzhang_part1@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@nzhang_part1@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@nzhang_part2@ds=2008-12-31/hr=11
+POSTHOOK: Output: default@nzhang_part2@ds=2008-12-31/hr=12
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+PREHOOK: query: show partitions nzhang_part1
+PREHOOK: type: SHOWPARTITIONS
+POSTHOOK: query: show partitions nzhang_part1
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+ds=2008-04-08/hr=11
+ds=2008-04-08/hr=12
+PREHOOK: query: show partitions nzhang_part2
+PREHOOK: type: SHOWPARTITIONS
+POSTHOOK: query: show partitions nzhang_part2
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+ds=2008-12-31/hr=11
+ds=2008-12-31/hr=12
+PREHOOK: query: select * from nzhang_part1 where ds is not null and hr is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=11
+PREHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=12
+PREHOOK: Output: file:/tmp/aaly/hive_2010-08-09_13-10-49_944_7480353582912969356/-mr-10000
+POSTHOOK: query: select * from nzhang_part1 where ds is not null and hr is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=12
+POSTHOOK: Output: file:/tmp/aaly/hive_2010-08-09_13-10-49_944_7480353582912969356/-mr-10000
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+238 val_238 2008-04-08 11
+86 val_86 2008-04-08 11
+311 val_311 2008-04-08 11
+27 val_27 2008-04-08 11
+165 val_165 2008-04-08 11
+409 val_409 2008-04-08 11
+255 val_255 2008-04-08 11
+278 val_278 2008-04-08 11
+98 val_98 2008-04-08 11
+484 val_484 2008-04-08 11
+265 val_265 2008-04-08 11
+193 val_193 2008-04-08 11
+401 val_401 2008-04-08 11
+150 val_150 2008-04-08 11
+273 val_273 2008-04-08 11
+224 val_224 2008-04-08 11
+369 val_369 2008-04-08 11
+66 val_66 2008-04-08 11
+128 val_128 2008-04-08 11
+213 val_213 2008-04-08 11
+146 val_146 2008-04-08 11
+406 val_406 2008-04-08 11
+429 val_429 2008-04-08 11
+374 val_374 2008-04-08 11
+152 val_152 2008-04-08 11
+469 val_469 2008-04-08 11
+145 val_145 2008-04-08 11
+495 val_495 2008-04-08 11
+37 val_37 2008-04-08 11
+327 val_327 2008-04-08 11
+281 val_281 2008-04-08 11
+277 val_277 2008-04-08 11
+209 val_209 2008-04-08 11
+15 val_15 2008-04-08 11
+82 val_82 2008-04-08 11
+403 val_403 2008-04-08 11
+166 val_166 2008-04-08 11
+417 val_417 2008-04-08 11
+430 val_430 2008-04-08 11
+252 val_252 2008-04-08 11
+292 val_292 2008-04-08 11
+219 val_219 2008-04-08 11
+287 val_287 2008-04-08 11
+153 val_153 2008-04-08 11
+193 val_193 2008-04-08 11
+338 val_338 2008-04-08 11
+446 val_446 2008-04-08 11
+459 val_459 2008-04-08 11
+394 val_394 2008-04-08 11
+237 val_237 2008-04-08 11
+482 val_482 2008-04-08 11
+174 val_174 2008-04-08 11
+413 val_413 2008-04-08 11
+494 val_494 2008-04-08 11
+207 val_207 2008-04-08 11
+199 val_199 2008-04-08 11
+466 val_466 2008-04-08 11
+208 val_208 2008-04-08 11
+174 val_174 2008-04-08 11
+399 val_399 2008-04-08 11
+396 val_396 2008-04-08 11
+247 val_247 2008-04-08 11
+417 val_417 2008-04-08 11
+489 val_489 2008-04-08 11
+162 val_162 2008-04-08 11
+377 val_377 2008-04-08 11
+397 val_397 2008-04-08 11
+309 val_309 2008-04-08 11
+365 val_365 2008-04-08 11
+266 val_266 2008-04-08 11
+439 val_439 2008-04-08 11
+342 val_342 2008-04-08 11
+367 val_367 2008-04-08 11
+325 val_325 2008-04-08 11
+167 val_167 2008-04-08 11
+195 val_195 2008-04-08 11
+475 val_475 2008-04-08 11
+17 val_17 2008-04-08 11
+113 val_113 2008-04-08 11
+155 val_155 2008-04-08 11
+203 val_203 2008-04-08 11
+339 val_339 2008-04-08 11
+0 val_0 2008-04-08 11
+455 val_455 2008-04-08 11
+128 val_128 2008-04-08 11
+311 val_311 2008-04-08 11
+316 val_316 2008-04-08 11
+57 val_57 2008-04-08 11
+302 val_302 2008-04-08 11
+205 val_205 2008-04-08 11
+149 val_149 2008-04-08 11
+438 val_438 2008-04-08 11
+345 val_345 2008-04-08 11
+129 val_129 2008-04-08 11
+170 val_170 2008-04-08 11
+20 val_20 2008-04-08 11
+489 val_489 2008-04-08 11
+157 val_157 2008-04-08 11
+378 val_378 2008-04-08 11
+221 val_221 2008-04-08 11
+92 val_92 2008-04-08 11
+111 val_111 2008-04-08 11
+47 val_47 2008-04-08 11
+72 val_72 2008-04-08 11
+4 val_4 2008-04-08 11
+280 val_280 2008-04-08 11
+35 val_35 2008-04-08 11
+427 val_427 2008-04-08 11
+277 val_277 2008-04-08 11
+208 val_208 2008-04-08 11
+356 val_356 2008-04-08 11
+399 val_399 2008-04-08 11
+169 val_169 2008-04-08 11
+382 val_382 2008-04-08 11
+498 val_498 2008-04-08 11
+125 val_125 2008-04-08 11
+386 val_386 2008-04-08 11
+437 val_437 2008-04-08 11
+469 val_469 2008-04-08 11
+192 val_192 2008-04-08 11
+286 val_286 2008-04-08 11
+187 val_187 2008-04-08 11
+176 val_176 2008-04-08 11
+54 val_54 2008-04-08 11
+459 val_459 2008-04-08 11
+51 val_51 2008-04-08 11
+138 val_138 2008-04-08 11
+103 val_103 2008-04-08 11
+239 val_239 2008-04-08 11
+213 val_213 2008-04-08 11
+216 val_216 2008-04-08 11
+430 val_430 2008-04-08 11
+278 val_278 2008-04-08 11
+176 val_176 2008-04-08 11
+289 val_289 2008-04-08 11
+221 val_221 2008-04-08 11
+65 val_65 2008-04-08 11
+318 val_318 2008-04-08 11
+332 val_332 2008-04-08 11
+311 val_311 2008-04-08 11
+275 val_275 2008-04-08 11
+137 val_137 2008-04-08 11
+241 val_241 2008-04-08 11
+83 val_83 2008-04-08 11
+333 val_333 2008-04-08 11
+180 val_180 2008-04-08 11
+284 val_284 2008-04-08 11
+12 val_12 2008-04-08 11
+230 val_230 2008-04-08 11
+181 val_181 2008-04-08 11
+67 val_67 2008-04-08 11
+260 val_260 2008-04-08 11
+404 val_404 2008-04-08 11
+384 val_384 2008-04-08 11
+489 val_489 2008-04-08 11
+353 val_353 2008-04-08 11
+373 val_373 2008-04-08 11
+272 val_272 2008-04-08 11
+138 val_138 2008-04-08 11
+217 val_217 2008-04-08 11
+84 val_84 2008-04-08 11
+348 val_348 2008-04-08 11
+466 val_466 2008-04-08 11
+58 val_58 2008-04-08 11
+8 val_8 2008-04-08 11
+411 val_411 2008-04-08 11
+230 val_230 2008-04-08 11
+208 val_208 2008-04-08 11
+348 val_348 2008-04-08 11
+24 val_24 2008-04-08 11
+463 val_463 2008-04-08 11
+431 val_431 2008-04-08 11
+179 val_179 2008-04-08 11
+172 val_172 2008-04-08 11
+42 val_42 2008-04-08 11
+129 val_129 2008-04-08 11
+158 val_158 2008-04-08 11
+119 val_119 2008-04-08 11
+496 val_496 2008-04-08 11
+0 val_0 2008-04-08 11
+322 val_322 2008-04-08 11
+197 val_197 2008-04-08 11
+468 val_468 2008-04-08 11
+393 val_393 2008-04-08 11
+454 val_454 2008-04-08 11
+100 val_100 2008-04-08 11
+298 val_298 2008-04-08 11
+199 val_199 2008-04-08 11
+191 val_191 2008-04-08 11
+418 val_418 2008-04-08 11
+96 val_96 2008-04-08 11
+26 val_26 2008-04-08 11
+165 val_165 2008-04-08 11
+327 val_327 2008-04-08 11
+230 val_230 2008-04-08 11
+205 val_205 2008-04-08 11
+120 val_120 2008-04-08 11
+131 val_131 2008-04-08 11
+51 val_51 2008-04-08 11
+404 val_404 2008-04-08 11
+43 val_43 2008-04-08 11
+436 val_436 2008-04-08 11
+156 val_156 2008-04-08 11
+469 val_469 2008-04-08 11
+468 val_468 2008-04-08 11
+308 val_308 2008-04-08 11
+95 val_95 2008-04-08 11
+196 val_196 2008-04-08 11
+288 val_288 2008-04-08 11
+481 val_481 2008-04-08 11
+457 val_457 2008-04-08 11
+98 val_98 2008-04-08 11
+282 val_282 2008-04-08 11
+197 val_197 2008-04-08 11
+187 val_187 2008-04-08 11
+318 val_318 2008-04-08 11
+318 val_318 2008-04-08 11
+409 val_409 2008-04-08 11
+470 val_470 2008-04-08 11
+137 val_137 2008-04-08 11
+369 val_369 2008-04-08 11
+316 val_316 2008-04-08 11
+169 val_169 2008-04-08 11
+413 val_413 2008-04-08 11
+85 val_85 2008-04-08 11
+77 val_77 2008-04-08 11
+0 val_0 2008-04-08 11
+490 val_490 2008-04-08 11
+87 val_87 2008-04-08 11
+364 val_364 2008-04-08 11
+179 val_179 2008-04-08 11
+118 val_118 2008-04-08 11
+134 val_134 2008-04-08 11
+395 val_395 2008-04-08 11
+282 val_282 2008-04-08 11
+138 val_138 2008-04-08 11
+238 val_238 2008-04-08 11
+419 val_419 2008-04-08 11
+15 val_15 2008-04-08 11
+118 val_118 2008-04-08 11
+72 val_72 2008-04-08 11
+90 val_90 2008-04-08 11
+307 val_307 2008-04-08 11
+19 val_19 2008-04-08 11
+435 val_435 2008-04-08 11
+10 val_10 2008-04-08 11
+277 val_277 2008-04-08 11
+273 val_273 2008-04-08 11
+306 val_306 2008-04-08 11
+224 val_224 2008-04-08 11
+309 val_309 2008-04-08 11
+389 val_389 2008-04-08 11
+327 val_327 2008-04-08 11
+242 val_242 2008-04-08 11
+369 val_369 2008-04-08 11
+392 val_392 2008-04-08 11
+272 val_272 2008-04-08 11
+331 val_331 2008-04-08 11
+401 val_401 2008-04-08 11
+242 val_242 2008-04-08 11
+452 val_452 2008-04-08 11
+177 val_177 2008-04-08 11
+226 val_226 2008-04-08 11
+5 val_5 2008-04-08 11
+497 val_497 2008-04-08 11
+402 val_402 2008-04-08 11
+396 val_396 2008-04-08 11
+317 val_317 2008-04-08 11
+395 val_395 2008-04-08 11
+58 val_58 2008-04-08 11
+35 val_35 2008-04-08 11
+336 val_336 2008-04-08 11
+95 val_95 2008-04-08 11
+11 val_11 2008-04-08 11
+168 val_168 2008-04-08 11
+34 val_34 2008-04-08 11
+229 val_229 2008-04-08 11
+233 val_233 2008-04-08 11
+143 val_143 2008-04-08 11
+472 val_472 2008-04-08 11
+322 val_322 2008-04-08 11
+498 val_498 2008-04-08 11
+160 val_160 2008-04-08 11
+195 val_195 2008-04-08 11
+42 val_42 2008-04-08 11
+321 val_321 2008-04-08 11
+430 val_430 2008-04-08 11
+119 val_119 2008-04-08 11
+489 val_489 2008-04-08 11
+458 val_458 2008-04-08 11
+78 val_78 2008-04-08 11
+76 val_76 2008-04-08 11
+41 val_41 2008-04-08 11
+223 val_223 2008-04-08 11
+492 val_492 2008-04-08 11
+149 val_149 2008-04-08 11
+449 val_449 2008-04-08 11
+218 val_218 2008-04-08 11
+228 val_228 2008-04-08 11
+138 val_138 2008-04-08 11
+453 val_453 2008-04-08 11
+30 val_30 2008-04-08 11
+209 val_209 2008-04-08 11
+64 val_64 2008-04-08 11
+468 val_468 2008-04-08 11
+76 val_76 2008-04-08 11
+74 val_74 2008-04-08 11
+342 val_342 2008-04-08 11
+69 val_69 2008-04-08 11
+230 val_230 2008-04-08 11
+33 val_33 2008-04-08 11
+368 val_368 2008-04-08 11
+103 val_103 2008-04-08 11
+296 val_296 2008-04-08 11
+113 val_113 2008-04-08 11
+216 val_216 2008-04-08 11
+367 val_367 2008-04-08 11
+344 val_344 2008-04-08 11
+167 val_167 2008-04-08 11
+274 val_274 2008-04-08 11
+219 val_219 2008-04-08 11
+239 val_239 2008-04-08 11
+485 val_485 2008-04-08 11
+116 val_116 2008-04-08 11
+223 val_223 2008-04-08 11
+256 val_256 2008-04-08 11
+263 val_263 2008-04-08 11
+70 val_70 2008-04-08 11
+487 val_487 2008-04-08 11
+480 val_480 2008-04-08 11
+401 val_401 2008-04-08 11
+288 val_288 2008-04-08 11
+191 val_191 2008-04-08 11
+5 val_5 2008-04-08 11
+244 val_244 2008-04-08 11
+438 val_438 2008-04-08 11
+128 val_128 2008-04-08 11
+467 val_467 2008-04-08 11
+432 val_432 2008-04-08 11
+202 val_202 2008-04-08 11
+316 val_316 2008-04-08 11
+229 val_229 2008-04-08 11
+469 val_469 2008-04-08 11
+463 val_463 2008-04-08 11
+280 val_280 2008-04-08 11
+2 val_2 2008-04-08 11
+35 val_35 2008-04-08 11
+283 val_283 2008-04-08 11
+331 val_331 2008-04-08 11
+235 val_235 2008-04-08 11
+80 val_80 2008-04-08 11
+44 val_44 2008-04-08 11
+193 val_193 2008-04-08 11
+321 val_321 2008-04-08 11
+335 val_335 2008-04-08 11
+104 val_104 2008-04-08 11
+466 val_466 2008-04-08 11
+366 val_366 2008-04-08 11
+175 val_175 2008-04-08 11
+403 val_403 2008-04-08 11
+483 val_483 2008-04-08 11
+53 val_53 2008-04-08 11
+105 val_105 2008-04-08 11
+257 val_257 2008-04-08 11
+406 val_406 2008-04-08 11
+409 val_409 2008-04-08 11
+190 val_190 2008-04-08 11
+406 val_406 2008-04-08 11
+401 val_401 2008-04-08 11
+114 val_114 2008-04-08 11
+258 val_258 2008-04-08 11
+90 val_90 2008-04-08 11
+203 val_203 2008-04-08 11
+262 val_262 2008-04-08 11
+348 val_348 2008-04-08 11
+424 val_424 2008-04-08 11
+12 val_12 2008-04-08 11
+396 val_396 2008-04-08 11
+201 val_201 2008-04-08 11
+217 val_217 2008-04-08 11
+164 val_164 2008-04-08 11
+431 val_431 2008-04-08 11
+454 val_454 2008-04-08 11
+478 val_478 2008-04-08 11
+298 val_298 2008-04-08 11
+125 val_125 2008-04-08 11
+431 val_431 2008-04-08 11
+164 val_164 2008-04-08 11
+424 val_424 2008-04-08 11
+187 val_187 2008-04-08 11
+382 val_382 2008-04-08 11
+5 val_5 2008-04-08 11
+70 val_70 2008-04-08 11
+397 val_397 2008-04-08 11
+480 val_480 2008-04-08 11
+291 val_291 2008-04-08 11
+24 val_24 2008-04-08 11
+351 val_351 2008-04-08 11
+255 val_255 2008-04-08 11
+104 val_104 2008-04-08 11
+70 val_70 2008-04-08 11
+163 val_163 2008-04-08 11
+438 val_438 2008-04-08 11
+119 val_119 2008-04-08 11
+414 val_414 2008-04-08 11
+200 val_200 2008-04-08 11
+491 val_491 2008-04-08 11
+237 val_237 2008-04-08 11
+439 val_439 2008-04-08 11
+360 val_360 2008-04-08 11
+248 val_248 2008-04-08 11
+479 val_479 2008-04-08 11
+305 val_305 2008-04-08 11
+417 val_417 2008-04-08 11
+199 val_199 2008-04-08 11
+444 val_444 2008-04-08 11
+120 val_120 2008-04-08 11
+429 val_429 2008-04-08 11
+169 val_169 2008-04-08 11
+443 val_443 2008-04-08 11
+323 val_323 2008-04-08 11
+325 val_325 2008-04-08 11
+277 val_277 2008-04-08 11
+230 val_230 2008-04-08 11
+478 val_478 2008-04-08 11
+178 val_178 2008-04-08 11
+468 val_468 2008-04-08 11
+310 val_310 2008-04-08 11
+317 val_317 2008-04-08 11
+333 val_333 2008-04-08 11
+493 val_493 2008-04-08 11
+460 val_460 2008-04-08 11
+207 val_207 2008-04-08 11
+249 val_249 2008-04-08 11
+265 val_265 2008-04-08 11
+480 val_480 2008-04-08 11
+83 val_83 2008-04-08 11
+136 val_136 2008-04-08 11
+353 val_353 2008-04-08 11
+172 val_172 2008-04-08 11
+214 val_214 2008-04-08 11
+462 val_462 2008-04-08 11
+233 val_233 2008-04-08 11
+406 val_406 2008-04-08 11
+133 val_133 2008-04-08 11
+175 val_175 2008-04-08 11
+189 val_189 2008-04-08 11
+454 val_454 2008-04-08 11
+375 val_375 2008-04-08 11
+401 val_401 2008-04-08 11
+421 val_421 2008-04-08 11
+407 val_407 2008-04-08 11
+384 val_384 2008-04-08 11
+256 val_256 2008-04-08 11
+26 val_26 2008-04-08 11
+134 val_134 2008-04-08 11
+67 val_67 2008-04-08 11
+384 val_384 2008-04-08 11
+379 val_379 2008-04-08 11
+18 val_18 2008-04-08 11
+462 val_462 2008-04-08 11
+492 val_492 2008-04-08 11
+100 val_100 2008-04-08 11
+298 val_298 2008-04-08 11
+9 val_9 2008-04-08 11
+341 val_341 2008-04-08 11
+498 val_498 2008-04-08 11
+146 val_146 2008-04-08 11
+458 val_458 2008-04-08 11
+362 val_362 2008-04-08 11
+186 val_186 2008-04-08 11
+285 val_285 2008-04-08 11
+348 val_348 2008-04-08 11
+167 val_167 2008-04-08 11
+18 val_18 2008-04-08 11
+273 val_273 2008-04-08 11
+183 val_183 2008-04-08 11
+281 val_281 2008-04-08 11
+344 val_344 2008-04-08 11
+97 val_97 2008-04-08 11
+469 val_469 2008-04-08 11
+315 val_315 2008-04-08 11
+84 val_84 2008-04-08 11
+28 val_28 2008-04-08 11
+37 val_37 2008-04-08 11
+448 val_448 2008-04-08 11
+152 val_152 2008-04-08 11
+348 val_348 2008-04-08 11
+307 val_307 2008-04-08 11
+194 val_194 2008-04-08 11
+414 val_414 2008-04-08 11
+477 val_477 2008-04-08 11
+222 val_222 2008-04-08 11
+126 val_126 2008-04-08 11
+90 val_90 2008-04-08 11
+169 val_169 2008-04-08 11
+403 val_403 2008-04-08 11
+400 val_400 2008-04-08 11
+200 val_200 2008-04-08 11
+97 val_97 2008-04-08 11
+238 val_238 2008-04-08 12
+86 val_86 2008-04-08 12
+311 val_311 2008-04-08 12
+27 val_27 2008-04-08 12
+165 val_165 2008-04-08 12
+409 val_409 2008-04-08 12
+255 val_255 2008-04-08 12
+278 val_278 2008-04-08 12
+98 val_98 2008-04-08 12
+484 val_484 2008-04-08 12
+265 val_265 2008-04-08 12
+193 val_193 2008-04-08 12
+401 val_401 2008-04-08 12
+150 val_150 2008-04-08 12
+273 val_273 2008-04-08 12
+224 val_224 2008-04-08 12
+369 val_369 2008-04-08 12
+66 val_66 2008-04-08 12
+128 val_128 2008-04-08 12
+213 val_213 2008-04-08 12
+146 val_146 2008-04-08 12
+406 val_406 2008-04-08 12
+429 val_429 2008-04-08 12
+374 val_374 2008-04-08 12
+152 val_152 2008-04-08 12
+469 val_469 2008-04-08 12
+145 val_145 2008-04-08 12
+495 val_495 2008-04-08 12
+37 val_37 2008-04-08 12
+327 val_327 2008-04-08 12
+281 val_281 2008-04-08 12
+277 val_277 2008-04-08 12
+209 val_209 2008-04-08 12
+15 val_15 2008-04-08 12
+82 val_82 2008-04-08 12
+403 val_403 2008-04-08 12
+166 val_166 2008-04-08 12
+417 val_417 2008-04-08 12
+430 val_430 2008-04-08 12
+252 val_252 2008-04-08 12
+292 val_292 2008-04-08 12
+219 val_219 2008-04-08 12
+287 val_287 2008-04-08 12
+153 val_153 2008-04-08 12
+193 val_193 2008-04-08 12
+338 val_338 2008-04-08 12
+446 val_446 2008-04-08 12
+459 val_459 2008-04-08 12
+394 val_394 2008-04-08 12
+237 val_237 2008-04-08 12
+482 val_482 2008-04-08 12
+174 val_174 2008-04-08 12
+413 val_413 2008-04-08 12
+494 val_494 2008-04-08 12
+207 val_207 2008-04-08 12
+199 val_199 2008-04-08 12
+466 val_466 2008-04-08 12
+208 val_208 2008-04-08 12
+174 val_174 2008-04-08 12
+399 val_399 2008-04-08 12
+396 val_396 2008-04-08 12
+247 val_247 2008-04-08 12
+417 val_417 2008-04-08 12
+489 val_489 2008-04-08 12
+162 val_162 2008-04-08 12
+377 val_377 2008-04-08 12
+397 val_397 2008-04-08 12
+309 val_309 2008-04-08 12
+365 val_365 2008-04-08 12
+266 val_266 2008-04-08 12
+439 val_439 2008-04-08 12
+342 val_342 2008-04-08 12
+367 val_367 2008-04-08 12
+325 val_325 2008-04-08 12
+167 val_167 2008-04-08 12
+195 val_195 2008-04-08 12
+475 val_475 2008-04-08 12
+17 val_17 2008-04-08 12
+113 val_113 2008-04-08 12
+155 val_155 2008-04-08 12
+203 val_203 2008-04-08 12
+339 val_339 2008-04-08 12
+0 val_0 2008-04-08 12
+455 val_455 2008-04-08 12
+128 val_128 2008-04-08 12
+311 val_311 2008-04-08 12
+316 val_316 2008-04-08 12
+57 val_57 2008-04-08 12
+302 val_302 2008-04-08 12
+205 val_205 2008-04-08 12
+149 val_149 2008-04-08 12
+438 val_438 2008-04-08 12
+345 val_345 2008-04-08 12
+129 val_129 2008-04-08 12
+170 val_170 2008-04-08 12
+20 val_20 2008-04-08 12
+489 val_489 2008-04-08 12
+157 val_157 2008-04-08 12
+378 val_378 2008-04-08 12
+221 val_221 2008-04-08 12
+92 val_92 2008-04-08 12
+111 val_111 2008-04-08 12
+47 val_47 2008-04-08 12
+72 val_72 2008-04-08 12
+4 val_4 2008-04-08 12
+280 val_280 2008-04-08 12
+35 val_35 2008-04-08 12
+427 val_427 2008-04-08 12
+277 val_277 2008-04-08 12
+208 val_208 2008-04-08 12
+356 val_356 2008-04-08 12
+399 val_399 2008-04-08 12
+169 val_169 2008-04-08 12
+382 val_382 2008-04-08 12
+498 val_498 2008-04-08 12
+125 val_125 2008-04-08 12
+386 val_386 2008-04-08 12
+437 val_437 2008-04-08 12
+469 val_469 2008-04-08 12
+192 val_192 2008-04-08 12
+286 val_286 2008-04-08 12
+187 val_187 2008-04-08 12
+176 val_176 2008-04-08 12
+54 val_54 2008-04-08 12
+459 val_459 2008-04-08 12
+51 val_51 2008-04-08 12
+138 val_138 2008-04-08 12
+103 val_103 2008-04-08 12
+239 val_239 2008-04-08 12
+213 val_213 2008-04-08 12
+216 val_216 2008-04-08 12
+430 val_430 2008-04-08 12
+278 val_278 2008-04-08 12
+176 val_176 2008-04-08 12
+289 val_289 2008-04-08 12
+221 val_221 2008-04-08 12
+65 val_65 2008-04-08 12
+318 val_318 2008-04-08 12
+332 val_332 2008-04-08 12
+311 val_311 2008-04-08 12
+275 val_275 2008-04-08 12
+137 val_137 2008-04-08 12
+241 val_241 2008-04-08 12
+83 val_83 2008-04-08 12
+333 val_333 2008-04-08 12
+180 val_180 2008-04-08 12
+284 val_284 2008-04-08 12
+12 val_12 2008-04-08 12
+230 val_230 2008-04-08 12
+181 val_181 2008-04-08 12
+67 val_67 2008-04-08 12
+260 val_260 2008-04-08 12
+404 val_404 2008-04-08 12
+384 val_384 2008-04-08 12
+489 val_489 2008-04-08 12
+353 val_353 2008-04-08 12
+373 val_373 2008-04-08 12
+272 val_272 2008-04-08 12
+138 val_138 2008-04-08 12
+217 val_217 2008-04-08 12
+84 val_84 2008-04-08 12
+348 val_348 2008-04-08 12
+466 val_466 2008-04-08 12
+58 val_58 2008-04-08 12
+8 val_8 2008-04-08 12
+411 val_411 2008-04-08 12
+230 val_230 2008-04-08 12
+208 val_208 2008-04-08 12
+348 val_348 2008-04-08 12
+24 val_24 2008-04-08 12
+463 val_463 2008-04-08 12
+431 val_431 2008-04-08 12
+179 val_179 2008-04-08 12
+172 val_172 2008-04-08 12
+42 val_42 2008-04-08 12
+129 val_129 2008-04-08 12
+158 val_158 2008-04-08 12
+119 val_119 2008-04-08 12
+496 val_496 2008-04-08 12
+0 val_0 2008-04-08 12
+322 val_322 2008-04-08 12
+197 val_197 2008-04-08 12
+468 val_468 2008-04-08 12
+393 val_393 2008-04-08 12
+454 val_454 2008-04-08 12
+100 val_100 2008-04-08 12
+298 val_298 2008-04-08 12
+199 val_199 2008-04-08 12
+191 val_191 2008-04-08 12
+418 val_418 2008-04-08 12
+96 val_96 2008-04-08 12
+26 val_26 2008-04-08 12
+165 val_165 2008-04-08 12
+327 val_327 2008-04-08 12
+230 val_230 2008-04-08 12
+205 val_205 2008-04-08 12
+120 val_120 2008-04-08 12
+131 val_131 2008-04-08 12
+51 val_51 2008-04-08 12
+404 val_404 2008-04-08 12
+43 val_43 2008-04-08 12
+436 val_436 2008-04-08 12
+156 val_156 2008-04-08 12
+469 val_469 2008-04-08 12
+468 val_468 2008-04-08 12
+308 val_308 2008-04-08 12
+95 val_95 2008-04-08 12
+196 val_196 2008-04-08 12
+288 val_288 2008-04-08 12
+481 val_481 2008-04-08 12
+457 val_457 2008-04-08 12
+98 val_98 2008-04-08 12
+282 val_282 2008-04-08 12
+197 val_197 2008-04-08 12
+187 val_187 2008-04-08 12
+318 val_318 2008-04-08 12
+318 val_318 2008-04-08 12
+409 val_409 2008-04-08 12
+470 val_470 2008-04-08 12
+137 val_137 2008-04-08 12
+369 val_369 2008-04-08 12
+316 val_316 2008-04-08 12
+169 val_169 2008-04-08 12
+413 val_413 2008-04-08 12
+85 val_85 2008-04-08 12
+77 val_77 2008-04-08 12
+0 val_0 2008-04-08 12
+490 val_490 2008-04-08 12
+87 val_87 2008-04-08 12
+364 val_364 2008-04-08 12
+179 val_179 2008-04-08 12
+118 val_118 2008-04-08 12
+134 val_134 2008-04-08 12
+395 val_395 2008-04-08 12
+282 val_282 2008-04-08 12
+138 val_138 2008-04-08 12
+238 val_238 2008-04-08 12
+419 val_419 2008-04-08 12
+15 val_15 2008-04-08 12
+118 val_118 2008-04-08 12
+72 val_72 2008-04-08 12
+90 val_90 2008-04-08 12
+307 val_307 2008-04-08 12
+19 val_19 2008-04-08 12
+435 val_435 2008-04-08 12
+10 val_10 2008-04-08 12
+277 val_277 2008-04-08 12
+273 val_273 2008-04-08 12
+306 val_306 2008-04-08 12
+224 val_224 2008-04-08 12
+309 val_309 2008-04-08 12
+389 val_389 2008-04-08 12
+327 val_327 2008-04-08 12
+242 val_242 2008-04-08 12
+369 val_369 2008-04-08 12
+392 val_392 2008-04-08 12
+272 val_272 2008-04-08 12
+331 val_331 2008-04-08 12
+401 val_401 2008-04-08 12
+242 val_242 2008-04-08 12
+452 val_452 2008-04-08 12
+177 val_177 2008-04-08 12
+226 val_226 2008-04-08 12
+5 val_5 2008-04-08 12
+497 val_497 2008-04-08 12
+402 val_402 2008-04-08 12
+396 val_396 2008-04-08 12
+317 val_317 2008-04-08 12
+395 val_395 2008-04-08 12
+58 val_58 2008-04-08 12
+35 val_35 2008-04-08 12
+336 val_336 2008-04-08 12
+95 val_95 2008-04-08 12
+11 val_11 2008-04-08 12
+168 val_168 2008-04-08 12
+34 val_34 2008-04-08 12
+229 val_229 2008-04-08 12
+233 val_233 2008-04-08 12
+143 val_143 2008-04-08 12
+472 val_472 2008-04-08 12
+322 val_322 2008-04-08 12
+498 val_498 2008-04-08 12
+160 val_160 2008-04-08 12
+195 val_195 2008-04-08 12
+42 val_42 2008-04-08 12
+321 val_321 2008-04-08 12
+430 val_430 2008-04-08 12
+119 val_119 2008-04-08 12
+489 val_489 2008-04-08 12
+458 val_458 2008-04-08 12
+78 val_78 2008-04-08 12
+76 val_76 2008-04-08 12
+41 val_41 2008-04-08 12
+223 val_223 2008-04-08 12
+492 val_492 2008-04-08 12
+149 val_149 2008-04-08 12
+449 val_449 2008-04-08 12
+218 val_218 2008-04-08 12
+228 val_228 2008-04-08 12
+138 val_138 2008-04-08 12
+453 val_453 2008-04-08 12
+30 val_30 2008-04-08 12
+209 val_209 2008-04-08 12
+64 val_64 2008-04-08 12
+468 val_468 2008-04-08 12
+76 val_76 2008-04-08 12
+74 val_74 2008-04-08 12
+342 val_342 2008-04-08 12
+69 val_69 2008-04-08 12
+230 val_230 2008-04-08 12
+33 val_33 2008-04-08 12
+368 val_368 2008-04-08 12
+103 val_103 2008-04-08 12
+296 val_296 2008-04-08 12
+113 val_113 2008-04-08 12
+216 val_216 2008-04-08 12
+367 val_367 2008-04-08 12
+344 val_344 2008-04-08 12
+167 val_167 2008-04-08 12
+274 val_274 2008-04-08 12
+219 val_219 2008-04-08 12
+239 val_239 2008-04-08 12
+485 val_485 2008-04-08 12
+116 val_116 2008-04-08 12
+223 val_223 2008-04-08 12
+256 val_256 2008-04-08 12
+263 val_263 2008-04-08 12
+70 val_70 2008-04-08 12
+487 val_487 2008-04-08 12
+480 val_480 2008-04-08 12
+401 val_401 2008-04-08 12
+288 val_288 2008-04-08 12
+191 val_191 2008-04-08 12
+5 val_5 2008-04-08 12
+244 val_244 2008-04-08 12
+438 val_438 2008-04-08 12
+128 val_128 2008-04-08 12
+467 val_467 2008-04-08 12
+432 val_432 2008-04-08 12
+202 val_202 2008-04-08 12
+316 val_316 2008-04-08 12
+229 val_229 2008-04-08 12
+469 val_469 2008-04-08 12
+463 val_463 2008-04-08 12
+280 val_280 2008-04-08 12
+2 val_2 2008-04-08 12
+35 val_35 2008-04-08 12
+283 val_283 2008-04-08 12
+331 val_331 2008-04-08 12
+235 val_235 2008-04-08 12
+80 val_80 2008-04-08 12
+44 val_44 2008-04-08 12
+193 val_193 2008-04-08 12
+321 val_321 2008-04-08 12
+335 val_335 2008-04-08 12
+104 val_104 2008-04-08 12
+466 val_466 2008-04-08 12
+366 val_366 2008-04-08 12
+175 val_175 2008-04-08 12
+403 val_403 2008-04-08 12
+483 val_483 2008-04-08 12
+53 val_53 2008-04-08 12
+105 val_105 2008-04-08 12
+257 val_257 2008-04-08 12
+406 val_406 2008-04-08 12
+409 val_409 2008-04-08 12
+190 val_190 2008-04-08 12
+406 val_406 2008-04-08 12
+401 val_401 2008-04-08 12
+114 val_114 2008-04-08 12
+258 val_258 2008-04-08 12
+90 val_90 2008-04-08 12
+203 val_203 2008-04-08 12
+262 val_262 2008-04-08 12
+348 val_348 2008-04-08 12
+424 val_424 2008-04-08 12
+12 val_12 2008-04-08 12
+396 val_396 2008-04-08 12
+201 val_201 2008-04-08 12
+217 val_217 2008-04-08 12
+164 val_164 2008-04-08 12
+431 val_431 2008-04-08 12
+454 val_454 2008-04-08 12
+478 val_478 2008-04-08 12
+298 val_298 2008-04-08 12
+125 val_125 2008-04-08 12
+431 val_431 2008-04-08 12
+164 val_164 2008-04-08 12
+424 val_424 2008-04-08 12
+187 val_187 2008-04-08 12
+382 val_382 2008-04-08 12
+5 val_5 2008-04-08 12
+70 val_70 2008-04-08 12
+397 val_397 2008-04-08 12
+480 val_480 2008-04-08 12
+291 val_291 2008-04-08 12
+24 val_24 2008-04-08 12
+351 val_351 2008-04-08 12
+255 val_255 2008-04-08 12
+104 val_104 2008-04-08 12
+70 val_70 2008-04-08 12
+163 val_163 2008-04-08 12
+438 val_438 2008-04-08 12
+119 val_119 2008-04-08 12
+414 val_414 2008-04-08 12
+200 val_200 2008-04-08 12
+491 val_491 2008-04-08 12
+237 val_237 2008-04-08 12
+439 val_439 2008-04-08 12
+360 val_360 2008-04-08 12
+248 val_248 2008-04-08 12
+479 val_479 2008-04-08 12
+305 val_305 2008-04-08 12
+417 val_417 2008-04-08 12
+199 val_199 2008-04-08 12
+444 val_444 2008-04-08 12
+120 val_120 2008-04-08 12
+429 val_429 2008-04-08 12
+169 val_169 2008-04-08 12
+443 val_443 2008-04-08 12
+323 val_323 2008-04-08 12
+325 val_325 2008-04-08 12
+277 val_277 2008-04-08 12
+230 val_230 2008-04-08 12
+478 val_478 2008-04-08 12
+178 val_178 2008-04-08 12
+468 val_468 2008-04-08 12
+310 val_310 2008-04-08 12
+317 val_317 2008-04-08 12
+333 val_333 2008-04-08 12
+493 val_493 2008-04-08 12
+460 val_460 2008-04-08 12
+207 val_207 2008-04-08 12
+249 val_249 2008-04-08 12
+265 val_265 2008-04-08 12
+480 val_480 2008-04-08 12
+83 val_83 2008-04-08 12
+136 val_136 2008-04-08 12
+353 val_353 2008-04-08 12
+172 val_172 2008-04-08 12
+214 val_214 2008-04-08 12
+462 val_462 2008-04-08 12
+233 val_233 2008-04-08 12
+406 val_406 2008-04-08 12
+133 val_133 2008-04-08 12
+175 val_175 2008-04-08 12
+189 val_189 2008-04-08 12
+454 val_454 2008-04-08 12
+375 val_375 2008-04-08 12
+401 val_401 2008-04-08 12
+421 val_421 2008-04-08 12
+407 val_407 2008-04-08 12
+384 val_384 2008-04-08 12
+256 val_256 2008-04-08 12
+26 val_26 2008-04-08 12
+134 val_134 2008-04-08 12
+67 val_67 2008-04-08 12
+384 val_384 2008-04-08 12
+379 val_379 2008-04-08 12
+18 val_18 2008-04-08 12
+462 val_462 2008-04-08 12
+492 val_492 2008-04-08 12
+100 val_100 2008-04-08 12
+298 val_298 2008-04-08 12
+9 val_9 2008-04-08 12
+341 val_341 2008-04-08 12
+498 val_498 2008-04-08 12
+146 val_146 2008-04-08 12
+458 val_458 2008-04-08 12
+362 val_362 2008-04-08 12
+186 val_186 2008-04-08 12
+285 val_285 2008-04-08 12
+348 val_348 2008-04-08 12
+167 val_167 2008-04-08 12
+18 val_18 2008-04-08 12
+273 val_273 2008-04-08 12
+183 val_183 2008-04-08 12
+281 val_281 2008-04-08 12
+344 val_344 2008-04-08 12
+97 val_97 2008-04-08 12
+469 val_469 2008-04-08 12
+315 val_315 2008-04-08 12
+84 val_84 2008-04-08 12
+28 val_28 2008-04-08 12
+37 val_37 2008-04-08 12
+448 val_448 2008-04-08 12
+152 val_152 2008-04-08 12
+348 val_348 2008-04-08 12
+307 val_307 2008-04-08 12
+194 val_194 2008-04-08 12
+414 val_414 2008-04-08 12
+477 val_477 2008-04-08 12
+222 val_222 2008-04-08 12
+126 val_126 2008-04-08 12
+90 val_90 2008-04-08 12
+169 val_169 2008-04-08 12
+403 val_403 2008-04-08 12
+400 val_400 2008-04-08 12
+200 val_200 2008-04-08 12
+97 val_97 2008-04-08 12
+PREHOOK: query: select * from nzhang_part2 where ds is not null and hr is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=11
+PREHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=12
+PREHOOK: Output: file:/tmp/aaly/hive_2010-08-09_13-10-50_360_1986744011429301882/-mr-10000
+POSTHOOK: query: select * from nzhang_part2 where ds is not null and hr is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=11
+POSTHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=12
+POSTHOOK: Output: file:/tmp/aaly/hive_2010-08-09_13-10-50_360_1986744011429301882/-mr-10000
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+238 val_238 2008-12-31 11
+86 val_86 2008-12-31 11
+311 val_311 2008-12-31 11
+27 val_27 2008-12-31 11
+165 val_165 2008-12-31 11
+409 val_409 2008-12-31 11
+255 val_255 2008-12-31 11
+278 val_278 2008-12-31 11
+98 val_98 2008-12-31 11
+484 val_484 2008-12-31 11
+265 val_265 2008-12-31 11
+193 val_193 2008-12-31 11
+401 val_401 2008-12-31 11
+150 val_150 2008-12-31 11
+273 val_273 2008-12-31 11
+224 val_224 2008-12-31 11
+369 val_369 2008-12-31 11
+66 val_66 2008-12-31 11
+128 val_128 2008-12-31 11
+213 val_213 2008-12-31 11
+146 val_146 2008-12-31 11
+406 val_406 2008-12-31 11
+429 val_429 2008-12-31 11
+374 val_374 2008-12-31 11
+152 val_152 2008-12-31 11
+469 val_469 2008-12-31 11
+145 val_145 2008-12-31 11
+495 val_495 2008-12-31 11
+37 val_37 2008-12-31 11
+327 val_327 2008-12-31 11
+281 val_281 2008-12-31 11
+277 val_277 2008-12-31 11
+209 val_209 2008-12-31 11
+15 val_15 2008-12-31 11
+82 val_82 2008-12-31 11
+403 val_403 2008-12-31 11
+166 val_166 2008-12-31 11
+417 val_417 2008-12-31 11
+430 val_430 2008-12-31 11
+252 val_252 2008-12-31 11
+292 val_292 2008-12-31 11
+219 val_219 2008-12-31 11
+287 val_287 2008-12-31 11
+153 val_153 2008-12-31 11
+193 val_193 2008-12-31 11
+338 val_338 2008-12-31 11
+446 val_446 2008-12-31 11
+459 val_459 2008-12-31 11
+394 val_394 2008-12-31 11
+237 val_237 2008-12-31 11
+482 val_482 2008-12-31 11
+174 val_174 2008-12-31 11
+413 val_413 2008-12-31 11
+494 val_494 2008-12-31 11
+207 val_207 2008-12-31 11
+199 val_199 2008-12-31 11
+466 val_466 2008-12-31 11
+208 val_208 2008-12-31 11
+174 val_174 2008-12-31 11
+399 val_399 2008-12-31 11
+396 val_396 2008-12-31 11
+247 val_247 2008-12-31 11
+417 val_417 2008-12-31 11
+489 val_489 2008-12-31 11
+162 val_162 2008-12-31 11
+377 val_377 2008-12-31 11
+397 val_397 2008-12-31 11
+309 val_309 2008-12-31 11
+365 val_365 2008-12-31 11
+266 val_266 2008-12-31 11
+439 val_439 2008-12-31 11
+342 val_342 2008-12-31 11
+367 val_367 2008-12-31 11
+325 val_325 2008-12-31 11
+167 val_167 2008-12-31 11
+195 val_195 2008-12-31 11
+475 val_475 2008-12-31 11
+17 val_17 2008-12-31 11
+113 val_113 2008-12-31 11
+155 val_155 2008-12-31 11
+203 val_203 2008-12-31 11
+339 val_339 2008-12-31 11
+0 val_0 2008-12-31 11
+455 val_455 2008-12-31 11
+128 val_128 2008-12-31 11
+311 val_311 2008-12-31 11
+316 val_316 2008-12-31 11
+57 val_57 2008-12-31 11
+302 val_302 2008-12-31 11
+205 val_205 2008-12-31 11
+149 val_149 2008-12-31 11
+438 val_438 2008-12-31 11
+345 val_345 2008-12-31 11
+129 val_129 2008-12-31 11
+170 val_170 2008-12-31 11
+20 val_20 2008-12-31 11
+489 val_489 2008-12-31 11
+157 val_157 2008-12-31 11
+378 val_378 2008-12-31 11
+221 val_221 2008-12-31 11
+92 val_92 2008-12-31 11
+111 val_111 2008-12-31 11
+47 val_47 2008-12-31 11
+72 val_72 2008-12-31 11
+4 val_4 2008-12-31 11
+280 val_280 2008-12-31 11
+35 val_35 2008-12-31 11
+427 val_427 2008-12-31 11
+277 val_277 2008-12-31 11
+208 val_208 2008-12-31 11
+356 val_356 2008-12-31 11
+399 val_399 2008-12-31 11
+169 val_169 2008-12-31 11
+382 val_382 2008-12-31 11
+498 val_498 2008-12-31 11
+125 val_125 2008-12-31 11
+386 val_386 2008-12-31 11
+437 val_437 2008-12-31 11
+469 val_469 2008-12-31 11
+192 val_192 2008-12-31 11
+286 val_286 2008-12-31 11
+187 val_187 2008-12-31 11
+176 val_176 2008-12-31 11
+54 val_54 2008-12-31 11
+459 val_459 2008-12-31 11
+51 val_51 2008-12-31 11
+138 val_138 2008-12-31 11
+103 val_103 2008-12-31 11
+239 val_239 2008-12-31 11
+213 val_213 2008-12-31 11
+216 val_216 2008-12-31 11
+430 val_430 2008-12-31 11
+278 val_278 2008-12-31 11
+176 val_176 2008-12-31 11
+289 val_289 2008-12-31 11
+221 val_221 2008-12-31 11
+65 val_65 2008-12-31 11
+318 val_318 2008-12-31 11
+332 val_332 2008-12-31 11
+311 val_311 2008-12-31 11
+275 val_275 2008-12-31 11
+137 val_137 2008-12-31 11
+241 val_241 2008-12-31 11
+83 val_83 2008-12-31 11
+333 val_333 2008-12-31 11
+180 val_180 2008-12-31 11
+284 val_284 2008-12-31 11
+12 val_12 2008-12-31 11
+230 val_230 2008-12-31 11
+181 val_181 2008-12-31 11
+67 val_67 2008-12-31 11
+260 val_260 2008-12-31 11
+404 val_404 2008-12-31 11
+384 val_384 2008-12-31 11
+489 val_489 2008-12-31 11
+353 val_353 2008-12-31 11
+373 val_373 2008-12-31 11
+272 val_272 2008-12-31 11
+138 val_138 2008-12-31 11
+217 val_217 2008-12-31 11
+84 val_84 2008-12-31 11
+348 val_348 2008-12-31 11
+466 val_466 2008-12-31 11
+58 val_58 2008-12-31 11
+8 val_8 2008-12-31 11
+411 val_411 2008-12-31 11
+230 val_230 2008-12-31 11
+208 val_208 2008-12-31 11
+348 val_348 2008-12-31 11
+24 val_24 2008-12-31 11
+463 val_463 2008-12-31 11
+431 val_431 2008-12-31 11
+179 val_179 2008-12-31 11
+172 val_172 2008-12-31 11
+42 val_42 2008-12-31 11
+129 val_129 2008-12-31 11
+158 val_158 2008-12-31 11
+119 val_119 2008-12-31 11
+496 val_496 2008-12-31 11
+0 val_0 2008-12-31 11
+322 val_322 2008-12-31 11
+197 val_197 2008-12-31 11
+468 val_468 2008-12-31 11
+393 val_393 2008-12-31 11
+454 val_454 2008-12-31 11
+100 val_100 2008-12-31 11
+298 val_298 2008-12-31 11
+199 val_199 2008-12-31 11
+191 val_191 2008-12-31 11
+418 val_418 2008-12-31 11
+96 val_96 2008-12-31 11
+26 val_26 2008-12-31 11
+165 val_165 2008-12-31 11
+327 val_327 2008-12-31 11
+230 val_230 2008-12-31 11
+205 val_205 2008-12-31 11
+120 val_120 2008-12-31 11
+131 val_131 2008-12-31 11
+51 val_51 2008-12-31 11
+404 val_404 2008-12-31 11
+43 val_43 2008-12-31 11
+436 val_436 2008-12-31 11
+156 val_156 2008-12-31 11
+469 val_469 2008-12-31 11
+468 val_468 2008-12-31 11
+308 val_308 2008-12-31 11
+95 val_95 2008-12-31 11
+196 val_196 2008-12-31 11
+288 val_288 2008-12-31 11
+481 val_481 2008-12-31 11
+457 val_457 2008-12-31 11
+98 val_98 2008-12-31 11
+282 val_282 2008-12-31 11
+197 val_197 2008-12-31 11
+187 val_187 2008-12-31 11
+318 val_318 2008-12-31 11
+318 val_318 2008-12-31 11
+409 val_409 2008-12-31 11
+470 val_470 2008-12-31 11
+137 val_137 2008-12-31 11
+369 val_369 2008-12-31 11
+316 val_316 2008-12-31 11
+169 val_169 2008-12-31 11
+413 val_413 2008-12-31 11
+85 val_85 2008-12-31 11
+77 val_77 2008-12-31 11
+0 val_0 2008-12-31 11
+490 val_490 2008-12-31 11
+87 val_87 2008-12-31 11
+364 val_364 2008-12-31 11
+179 val_179 2008-12-31 11
+118 val_118 2008-12-31 11
+134 val_134 2008-12-31 11
+395 val_395 2008-12-31 11
+282 val_282 2008-12-31 11
+138 val_138 2008-12-31 11
+238 val_238 2008-12-31 11
+419 val_419 2008-12-31 11
+15 val_15 2008-12-31 11
+118 val_118 2008-12-31 11
+72 val_72 2008-12-31 11
+90 val_90 2008-12-31 11
+307 val_307 2008-12-31 11
+19 val_19 2008-12-31 11
+435 val_435 2008-12-31 11
+10 val_10 2008-12-31 11
+277 val_277 2008-12-31 11
+273 val_273 2008-12-31 11
+306 val_306 2008-12-31 11
+224 val_224 2008-12-31 11
+309 val_309 2008-12-31 11
+389 val_389 2008-12-31 11
+327 val_327 2008-12-31 11
+242 val_242 2008-12-31 11
+369 val_369 2008-12-31 11
+392 val_392 2008-12-31 11
+272 val_272 2008-12-31 11
+331 val_331 2008-12-31 11
+401 val_401 2008-12-31 11
+242 val_242 2008-12-31 11
+452 val_452 2008-12-31 11
+177 val_177 2008-12-31 11
+226 val_226 2008-12-31 11
+5 val_5 2008-12-31 11
+497 val_497 2008-12-31 11
+402 val_402 2008-12-31 11
+396 val_396 2008-12-31 11
+317 val_317 2008-12-31 11
+395 val_395 2008-12-31 11
+58 val_58 2008-12-31 11
+35 val_35 2008-12-31 11
+336 val_336 2008-12-31 11
+95 val_95 2008-12-31 11
+11 val_11 2008-12-31 11
+168 val_168 2008-12-31 11
+34 val_34 2008-12-31 11
+229 val_229 2008-12-31 11
+233 val_233 2008-12-31 11
+143 val_143 2008-12-31 11
+472 val_472 2008-12-31 11
+322 val_322 2008-12-31 11
+498 val_498 2008-12-31 11
+160 val_160 2008-12-31 11
+195 val_195 2008-12-31 11
+42 val_42 2008-12-31 11
+321 val_321 2008-12-31 11
+430 val_430 2008-12-31 11
+119 val_119 2008-12-31 11
+489 val_489 2008-12-31 11
+458 val_458 2008-12-31 11
+78 val_78 2008-12-31 11
+76 val_76 2008-12-31 11
+41 val_41 2008-12-31 11
+223 val_223 2008-12-31 11
+492 val_492 2008-12-31 11
+149 val_149 2008-12-31 11
+449 val_449 2008-12-31 11
+218 val_218 2008-12-31 11
+228 val_228 2008-12-31 11
+138 val_138 2008-12-31 11
+453 val_453 2008-12-31 11
+30 val_30 2008-12-31 11
+209 val_209 2008-12-31 11
+64 val_64 2008-12-31 11
+468 val_468 2008-12-31 11
+76 val_76 2008-12-31 11
+74 val_74 2008-12-31 11
+342 val_342 2008-12-31 11
+69 val_69 2008-12-31 11
+230 val_230 2008-12-31 11
+33 val_33 2008-12-31 11
+368 val_368 2008-12-31 11
+103 val_103 2008-12-31 11
+296 val_296 2008-12-31 11
+113 val_113 2008-12-31 11
+216 val_216 2008-12-31 11
+367 val_367 2008-12-31 11
+344 val_344 2008-12-31 11
+167 val_167 2008-12-31 11
+274 val_274 2008-12-31 11
+219 val_219 2008-12-31 11
+239 val_239 2008-12-31 11
+485 val_485 2008-12-31 11
+116 val_116 2008-12-31 11
+223 val_223 2008-12-31 11
+256 val_256 2008-12-31 11
+263 val_263 2008-12-31 11
+70 val_70 2008-12-31 11
+487 val_487 2008-12-31 11
+480 val_480 2008-12-31 11
+401 val_401 2008-12-31 11
+288 val_288 2008-12-31 11
+191 val_191 2008-12-31 11
+5 val_5 2008-12-31 11
+244 val_244 2008-12-31 11
+438 val_438 2008-12-31 11
+128 val_128 2008-12-31 11
+467 val_467 2008-12-31 11
+432 val_432 2008-12-31 11
+202 val_202 2008-12-31 11
+316 val_316 2008-12-31 11
+229 val_229 2008-12-31 11
+469 val_469 2008-12-31 11
+463 val_463 2008-12-31 11
+280 val_280 2008-12-31 11
+2 val_2 2008-12-31 11
+35 val_35 2008-12-31 11
+283 val_283 2008-12-31 11
+331 val_331 2008-12-31 11
+235 val_235 2008-12-31 11
+80 val_80 2008-12-31 11
+44 val_44 2008-12-31 11
+193 val_193 2008-12-31 11
+321 val_321 2008-12-31 11
+335 val_335 2008-12-31 11
+104 val_104 2008-12-31 11
+466 val_466 2008-12-31 11
+366 val_366 2008-12-31 11
+175 val_175 2008-12-31 11
+403 val_403 2008-12-31 11
+483 val_483 2008-12-31 11
+53 val_53 2008-12-31 11
+105 val_105 2008-12-31 11
+257 val_257 2008-12-31 11
+406 val_406 2008-12-31 11
+409 val_409 2008-12-31 11
+190 val_190 2008-12-31 11
+406 val_406 2008-12-31 11
+401 val_401 2008-12-31 11
+114 val_114 2008-12-31 11
+258 val_258 2008-12-31 11
+90 val_90 2008-12-31 11
+203 val_203 2008-12-31 11
+262 val_262 2008-12-31 11
+348 val_348 2008-12-31 11
+424 val_424 2008-12-31 11
+12 val_12 2008-12-31 11
+396 val_396 2008-12-31 11
+201 val_201 2008-12-31 11
+217 val_217 2008-12-31 11
+164 val_164 2008-12-31 11
+431 val_431 2008-12-31 11
+454 val_454 2008-12-31 11
+478 val_478 2008-12-31 11
+298 val_298 2008-12-31 11
+125 val_125 2008-12-31 11
+431 val_431 2008-12-31 11
+164 val_164 2008-12-31 11
+424 val_424 2008-12-31 11
+187 val_187 2008-12-31 11
+382 val_382 2008-12-31 11
+5 val_5 2008-12-31 11
+70 val_70 2008-12-31 11
+397 val_397 2008-12-31 11
+480 val_480 2008-12-31 11
+291 val_291 2008-12-31 11
+24 val_24 2008-12-31 11
+351 val_351 2008-12-31 11
+255 val_255 2008-12-31 11
+104 val_104 2008-12-31 11
+70 val_70 2008-12-31 11
+163 val_163 2008-12-31 11
+438 val_438 2008-12-31 11
+119 val_119 2008-12-31 11
+414 val_414 2008-12-31 11
+200 val_200 2008-12-31 11
+491 val_491 2008-12-31 11
+237 val_237 2008-12-31 11
+439 val_439 2008-12-31 11
+360 val_360 2008-12-31 11
+248 val_248 2008-12-31 11
+479 val_479 2008-12-31 11
+305 val_305 2008-12-31 11
+417 val_417 2008-12-31 11
+199 val_199 2008-12-31 11
+444 val_444 2008-12-31 11
+120 val_120 2008-12-31 11
+429 val_429 2008-12-31 11
+169 val_169 2008-12-31 11
+443 val_443 2008-12-31 11
+323 val_323 2008-12-31 11
+325 val_325 2008-12-31 11
+277 val_277 2008-12-31 11
+230 val_230 2008-12-31 11
+478 val_478 2008-12-31 11
+178 val_178 2008-12-31 11
+468 val_468 2008-12-31 11
+310 val_310 2008-12-31 11
+317 val_317 2008-12-31 11
+333 val_333 2008-12-31 11
+493 val_493 2008-12-31 11
+460 val_460 2008-12-31 11
+207 val_207 2008-12-31 11
+249 val_249 2008-12-31 11
+265 val_265 2008-12-31 11
+480 val_480 2008-12-31 11
+83 val_83 2008-12-31 11
+136 val_136 2008-12-31 11
+353 val_353 2008-12-31 11
+172 val_172 2008-12-31 11
+214 val_214 2008-12-31 11
+462 val_462 2008-12-31 11
+233 val_233 2008-12-31 11
+406 val_406 2008-12-31 11
+133 val_133 2008-12-31 11
+175 val_175 2008-12-31 11
+189 val_189 2008-12-31 11
+454 val_454 2008-12-31 11
+375 val_375 2008-12-31 11
+401 val_401 2008-12-31 11
+421 val_421 2008-12-31 11
+407 val_407 2008-12-31 11
+384 val_384 2008-12-31 11
+256 val_256 2008-12-31 11
+26 val_26 2008-12-31 11
+134 val_134 2008-12-31 11
+67 val_67 2008-12-31 11
+384 val_384 2008-12-31 11
+379 val_379 2008-12-31 11
+18 val_18 2008-12-31 11
+462 val_462 2008-12-31 11
+492 val_492 2008-12-31 11
+100 val_100 2008-12-31 11
+298 val_298 2008-12-31 11
+9 val_9 2008-12-31 11
+341 val_341 2008-12-31 11
+498 val_498 2008-12-31 11
+146 val_146 2008-12-31 11
+458 val_458 2008-12-31 11
+362 val_362 2008-12-31 11
+186 val_186 2008-12-31 11
+285 val_285 2008-12-31 11
+348 val_348 2008-12-31 11
+167 val_167 2008-12-31 11
+18 val_18 2008-12-31 11
+273 val_273 2008-12-31 11
+183 val_183 2008-12-31 11
+281 val_281 2008-12-31 11
+344 val_344 2008-12-31 11
+97 val_97 2008-12-31 11
+469 val_469 2008-12-31 11
+315 val_315 2008-12-31 11
+84 val_84 2008-12-31 11
+28 val_28 2008-12-31 11
+37 val_37 2008-12-31 11
+448 val_448 2008-12-31 11
+152 val_152 2008-12-31 11
+348 val_348 2008-12-31 11
+307 val_307 2008-12-31 11
+194 val_194 2008-12-31 11
+414 val_414 2008-12-31 11
+477 val_477 2008-12-31 11
+222 val_222 2008-12-31 11
+126 val_126 2008-12-31 11
+90 val_90 2008-12-31 11
+169 val_169 2008-12-31 11
+403 val_403 2008-12-31 11
+400 val_400 2008-12-31 11
+200 val_200 2008-12-31 11
+97 val_97 2008-12-31 11
+238 val_238 2008-12-31 12
+86 val_86 2008-12-31 12
+311 val_311 2008-12-31 12
+27 val_27 2008-12-31 12
+165 val_165 2008-12-31 12
+409 val_409 2008-12-31 12
+255 val_255 2008-12-31 12
+278 val_278 2008-12-31 12
+98 val_98 2008-12-31 12
+484 val_484 2008-12-31 12
+265 val_265 2008-12-31 12
+193 val_193 2008-12-31 12
+401 val_401 2008-12-31 12
+150 val_150 2008-12-31 12
+273 val_273 2008-12-31 12
+224 val_224 2008-12-31 12
+369 val_369 2008-12-31 12
+66 val_66 2008-12-31 12
+128 val_128 2008-12-31 12
+213 val_213 2008-12-31 12
+146 val_146 2008-12-31 12
+406 val_406 2008-12-31 12
+429 val_429 2008-12-31 12
+374 val_374 2008-12-31 12
+152 val_152 2008-12-31 12
+469 val_469 2008-12-31 12
+145 val_145 2008-12-31 12
+495 val_495 2008-12-31 12
+37 val_37 2008-12-31 12
+327 val_327 2008-12-31 12
+281 val_281 2008-12-31 12
+277 val_277 2008-12-31 12
+209 val_209 2008-12-31 12
+15 val_15 2008-12-31 12
+82 val_82 2008-12-31 12
+403 val_403 2008-12-31 12
+166 val_166 2008-12-31 12
+417 val_417 2008-12-31 12
+430 val_430 2008-12-31 12
+252 val_252 2008-12-31 12
+292 val_292 2008-12-31 12
+219 val_219 2008-12-31 12
+287 val_287 2008-12-31 12
+153 val_153 2008-12-31 12
+193 val_193 2008-12-31 12
+338 val_338 2008-12-31 12
+446 val_446 2008-12-31 12
+459 val_459 2008-12-31 12
+394 val_394 2008-12-31 12
+237 val_237 2008-12-31 12
+482 val_482 2008-12-31 12
+174 val_174 2008-12-31 12
+413 val_413 2008-12-31 12
+494 val_494 2008-12-31 12
+207 val_207 2008-12-31 12
+199 val_199 2008-12-31 12
+466 val_466 2008-12-31 12
+208 val_208 2008-12-31 12
+174 val_174 2008-12-31 12
+399 val_399 2008-12-31 12
+396 val_396 2008-12-31 12
+247 val_247 2008-12-31 12
+417 val_417 2008-12-31 12
+489 val_489 2008-12-31 12
+162 val_162 2008-12-31 12
+377 val_377 2008-12-31 12
+397 val_397 2008-12-31 12
+309 val_309 2008-12-31 12
+365 val_365 2008-12-31 12
+266 val_266 2008-12-31 12
+439 val_439 2008-12-31 12
+342 val_342 2008-12-31 12
+367 val_367 2008-12-31 12
+325 val_325 2008-12-31 12
+167 val_167 2008-12-31 12
+195 val_195 2008-12-31 12
+475 val_475 2008-12-31 12
+17 val_17 2008-12-31 12
+113 val_113 2008-12-31 12
+155 val_155 2008-12-31 12
+203 val_203 2008-12-31 12
+339 val_339 2008-12-31 12
+0 val_0 2008-12-31 12
+455 val_455 2008-12-31 12
+128 val_128 2008-12-31 12
+311 val_311 2008-12-31 12
+316 val_316 2008-12-31 12
+57 val_57 2008-12-31 12
+302 val_302 2008-12-31 12
+205 val_205 2008-12-31 12
+149 val_149 2008-12-31 12
+438 val_438 2008-12-31 12
+345 val_345 2008-12-31 12
+129 val_129 2008-12-31 12
+170 val_170 2008-12-31 12
+20 val_20 2008-12-31 12
+489 val_489 2008-12-31 12
+157 val_157 2008-12-31 12
+378 val_378 2008-12-31 12
+221 val_221 2008-12-31 12
+92 val_92 2008-12-31 12
+111 val_111 2008-12-31 12
+47 val_47 2008-12-31 12
+72 val_72 2008-12-31 12
+4 val_4 2008-12-31 12
+280 val_280 2008-12-31 12
+35 val_35 2008-12-31 12
+427 val_427 2008-12-31 12
+277 val_277 2008-12-31 12
+208 val_208 2008-12-31 12
+356 val_356 2008-12-31 12
+399 val_399 2008-12-31 12
+169 val_169 2008-12-31 12
+382 val_382 2008-12-31 12
+498 val_498 2008-12-31 12
+125 val_125 2008-12-31 12
+386 val_386 2008-12-31 12
+437 val_437 2008-12-31 12
+469 val_469 2008-12-31 12
+192 val_192 2008-12-31 12
+286 val_286 2008-12-31 12
+187 val_187 2008-12-31 12
+176 val_176 2008-12-31 12
+54 val_54 2008-12-31 12
+459 val_459 2008-12-31 12
+51 val_51 2008-12-31 12
+138 val_138 2008-12-31 12
+103 val_103 2008-12-31 12
+239 val_239 2008-12-31 12
+213 val_213 2008-12-31 12
+216 val_216 2008-12-31 12
+430 val_430 2008-12-31 12
+278 val_278 2008-12-31 12
+176 val_176 2008-12-31 12
+289 val_289 2008-12-31 12
+221 val_221 2008-12-31 12
+65 val_65 2008-12-31 12
+318 val_318 2008-12-31 12
+332 val_332 2008-12-31 12
+311 val_311 2008-12-31 12
+275 val_275 2008-12-31 12
+137 val_137 2008-12-31 12
+241 val_241 2008-12-31 12
+83 val_83 2008-12-31 12
+333 val_333 2008-12-31 12
+180 val_180 2008-12-31 12
+284 val_284 2008-12-31 12
+12 val_12 2008-12-31 12
+230 val_230 2008-12-31 12
+181 val_181 2008-12-31 12
+67 val_67 2008-12-31 12
+260 val_260 2008-12-31 12
+404 val_404 2008-12-31 12
+384 val_384 2008-12-31 12
+489 val_489 2008-12-31 12
+353 val_353 2008-12-31 12
+373 val_373 2008-12-31 12
+272 val_272 2008-12-31 12
+138 val_138 2008-12-31 12
+217 val_217 2008-12-31 12
+84 val_84 2008-12-31 12
+348 val_348 2008-12-31 12
+466 val_466 2008-12-31 12
+58 val_58 2008-12-31 12
+8 val_8 2008-12-31 12
+411 val_411 2008-12-31 12
+230 val_230 2008-12-31 12
+208 val_208 2008-12-31 12
+348 val_348 2008-12-31 12
+24 val_24 2008-12-31 12
+463 val_463 2008-12-31 12
+431 val_431 2008-12-31 12
+179 val_179 2008-12-31 12
+172 val_172 2008-12-31 12
+42 val_42 2008-12-31 12
+129 val_129 2008-12-31 12
+158 val_158 2008-12-31 12
+119 val_119 2008-12-31 12
+496 val_496 2008-12-31 12
+0 val_0 2008-12-31 12
+322 val_322 2008-12-31 12
+197 val_197 2008-12-31 12
+468 val_468 2008-12-31 12
+393 val_393 2008-12-31 12
+454 val_454 2008-12-31 12
+100 val_100 2008-12-31 12
+298 val_298 2008-12-31 12
+199 val_199 2008-12-31 12
+191 val_191 2008-12-31 12
+418 val_418 2008-12-31 12
+96 val_96 2008-12-31 12
+26 val_26 2008-12-31 12
+165 val_165 2008-12-31 12
+327 val_327 2008-12-31 12
+230 val_230 2008-12-31 12
+205 val_205 2008-12-31 12
+120 val_120 2008-12-31 12
+131 val_131 2008-12-31 12
+51 val_51 2008-12-31 12
+404 val_404 2008-12-31 12
+43 val_43 2008-12-31 12
+436 val_436 2008-12-31 12
+156 val_156 2008-12-31 12
+469 val_469 2008-12-31 12
+468 val_468 2008-12-31 12
+308 val_308 2008-12-31 12
+95 val_95 2008-12-31 12
+196 val_196 2008-12-31 12
+288 val_288 2008-12-31 12
+481 val_481 2008-12-31 12
+457 val_457 2008-12-31 12
+98 val_98 2008-12-31 12
+282 val_282 2008-12-31 12
+197 val_197 2008-12-31 12
+187 val_187 2008-12-31 12
+318 val_318 2008-12-31 12
+318 val_318 2008-12-31 12
+409 val_409 2008-12-31 12
+470 val_470 2008-12-31 12
+137 val_137 2008-12-31 12
+369 val_369 2008-12-31 12
+316 val_316 2008-12-31 12
+169 val_169 2008-12-31 12
+413 val_413 2008-12-31 12
+85 val_85 2008-12-31 12
+77 val_77 2008-12-31 12
+0 val_0 2008-12-31 12
+490 val_490 2008-12-31 12
+87 val_87 2008-12-31 12
+364 val_364 2008-12-31 12
+179 val_179 2008-12-31 12
+118 val_118 2008-12-31 12
+134 val_134 2008-12-31 12
+395 val_395 2008-12-31 12
+282 val_282 2008-12-31 12
+138 val_138 2008-12-31 12
+238 val_238 2008-12-31 12
+419 val_419 2008-12-31 12
+15 val_15 2008-12-31 12
+118 val_118 2008-12-31 12
+72 val_72 2008-12-31 12
+90 val_90 2008-12-31 12
+307 val_307 2008-12-31 12
+19 val_19 2008-12-31 12
+435 val_435 2008-12-31 12
+10 val_10 2008-12-31 12
+277 val_277 2008-12-31 12
+273 val_273 2008-12-31 12
+306 val_306 2008-12-31 12
+224 val_224 2008-12-31 12
+309 val_309 2008-12-31 12
+389 val_389 2008-12-31 12
+327 val_327 2008-12-31 12
+242 val_242 2008-12-31 12
+369 val_369 2008-12-31 12
+392 val_392 2008-12-31 12
+272 val_272 2008-12-31 12
+331 val_331 2008-12-31 12
+401 val_401 2008-12-31 12
+242 val_242 2008-12-31 12
+452 val_452 2008-12-31 12
+177 val_177 2008-12-31 12
+226 val_226 2008-12-31 12
+5 val_5 2008-12-31 12
+497 val_497 2008-12-31 12
+402 val_402 2008-12-31 12
+396 val_396 2008-12-31 12
+317 val_317 2008-12-31 12
+395 val_395 2008-12-31 12
+58 val_58 2008-12-31 12
+35 val_35 2008-12-31 12
+336 val_336 2008-12-31 12
+95 val_95 2008-12-31 12
+11 val_11 2008-12-31 12
+168 val_168 2008-12-31 12
+34 val_34 2008-12-31 12
+229 val_229 2008-12-31 12
+233 val_233 2008-12-31 12
+143 val_143 2008-12-31 12
+472 val_472 2008-12-31 12
+322 val_322 2008-12-31 12
+498 val_498 2008-12-31 12
+160 val_160 2008-12-31 12
+195 val_195 2008-12-31 12
+42 val_42 2008-12-31 12
+321 val_321 2008-12-31 12
+430 val_430 2008-12-31 12
+119 val_119 2008-12-31 12
+489 val_489 2008-12-31 12
+458 val_458 2008-12-31 12
+78 val_78 2008-12-31 12
+76 val_76 2008-12-31 12
+41 val_41 2008-12-31 12
+223 val_223 2008-12-31 12
+492 val_492 2008-12-31 12
+149 val_149 2008-12-31 12
+449 val_449 2008-12-31 12
+218 val_218 2008-12-31 12
+228 val_228 2008-12-31 12
+138 val_138 2008-12-31 12
+453 val_453 2008-12-31 12
+30 val_30 2008-12-31 12
+209 val_209 2008-12-31 12
+64 val_64 2008-12-31 12
+468 val_468 2008-12-31 12
+76 val_76 2008-12-31 12
+74 val_74 2008-12-31 12
+342 val_342 2008-12-31 12
+69 val_69 2008-12-31 12
+230 val_230 2008-12-31 12
+33 val_33 2008-12-31 12
+368 val_368 2008-12-31 12
+103 val_103 2008-12-31 12
+296 val_296 2008-12-31 12
+113 val_113 2008-12-31 12
+216 val_216 2008-12-31 12
+367 val_367 2008-12-31 12
+344 val_344 2008-12-31 12
+167 val_167 2008-12-31 12
+274 val_274 2008-12-31 12
+219 val_219 2008-12-31 12
+239 val_239 2008-12-31 12
+485 val_485 2008-12-31 12
+116 val_116 2008-12-31 12
+223 val_223 2008-12-31 12
+256 val_256 2008-12-31 12
+263 val_263 2008-12-31 12
+70 val_70 2008-12-31 12
+487 val_487 2008-12-31 12
+480 val_480 2008-12-31 12
+401 val_401 2008-12-31 12
+288 val_288 2008-12-31 12
+191 val_191 2008-12-31 12
+5 val_5 2008-12-31 12
+244 val_244 2008-12-31 12
+438 val_438 2008-12-31 12
+128 val_128 2008-12-31 12
+467 val_467 2008-12-31 12
+432 val_432 2008-12-31 12
+202 val_202 2008-12-31 12
+316 val_316 2008-12-31 12
+229 val_229 2008-12-31 12
+469 val_469 2008-12-31 12
+463 val_463 2008-12-31 12
+280 val_280 2008-12-31 12
+2 val_2 2008-12-31 12
+35 val_35 2008-12-31 12
+283 val_283 2008-12-31 12
+331 val_331 2008-12-31 12
+235 val_235 2008-12-31 12
+80 val_80 2008-12-31 12
+44 val_44 2008-12-31 12
+193 val_193 2008-12-31 12
+321 val_321 2008-12-31 12
+335 val_335 2008-12-31 12
+104 val_104 2008-12-31 12
+466 val_466 2008-12-31 12
+366 val_366 2008-12-31 12
+175 val_175 2008-12-31 12
+403 val_403 2008-12-31 12
+483 val_483 2008-12-31 12
+53 val_53 2008-12-31 12
+105 val_105 2008-12-31 12
+257 val_257 2008-12-31 12
+406 val_406 2008-12-31 12
+409 val_409 2008-12-31 12
+190 val_190 2008-12-31 12
+406 val_406 2008-12-31 12
+401 val_401 2008-12-31 12
+114 val_114 2008-12-31 12
+258 val_258 2008-12-31 12
+90 val_90 2008-12-31 12
+203 val_203 2008-12-31 12
+262 val_262 2008-12-31 12
+348 val_348 2008-12-31 12
+424 val_424 2008-12-31 12
+12 val_12 2008-12-31 12
+396 val_396 2008-12-31 12
+201 val_201 2008-12-31 12
+217 val_217 2008-12-31 12
+164 val_164 2008-12-31 12
+431 val_431 2008-12-31 12
+454 val_454 2008-12-31 12
+478 val_478 2008-12-31 12
+298 val_298 2008-12-31 12
+125 val_125 2008-12-31 12
+431 val_431 2008-12-31 12
+164 val_164 2008-12-31 12
+424 val_424 2008-12-31 12
+187 val_187 2008-12-31 12
+382 val_382 2008-12-31 12
+5 val_5 2008-12-31 12
+70 val_70 2008-12-31 12
+397 val_397 2008-12-31 12
+480 val_480 2008-12-31 12
+291 val_291 2008-12-31 12
+24 val_24 2008-12-31 12
+351 val_351 2008-12-31 12
+255 val_255 2008-12-31 12
+104 val_104 2008-12-31 12
+70 val_70 2008-12-31 12
+163 val_163 2008-12-31 12
+438 val_438 2008-12-31 12
+119 val_119 2008-12-31 12
+414 val_414 2008-12-31 12
+200 val_200 2008-12-31 12
+491 val_491 2008-12-31 12
+237 val_237 2008-12-31 12
+439 val_439 2008-12-31 12
+360 val_360 2008-12-31 12
+248 val_248 2008-12-31 12
+479 val_479 2008-12-31 12
+305 val_305 2008-12-31 12
+417 val_417 2008-12-31 12
+199 val_199 2008-12-31 12
+444 val_444 2008-12-31 12
+120 val_120 2008-12-31 12
+429 val_429 2008-12-31 12
+169 val_169 2008-12-31 12
+443 val_443 2008-12-31 12
+323 val_323 2008-12-31 12
+325 val_325 2008-12-31 12
+277 val_277 2008-12-31 12
+230 val_230 2008-12-31 12
+478 val_478 2008-12-31 12
+178 val_178 2008-12-31 12
+468 val_468 2008-12-31 12
+310 val_310 2008-12-31 12
+317 val_317 2008-12-31 12
+333 val_333 2008-12-31 12
+493 val_493 2008-12-31 12
+460 val_460 2008-12-31 12
+207 val_207 2008-12-31 12
+249 val_249 2008-12-31 12
+265 val_265 2008-12-31 12
+480 val_480 2008-12-31 12
+83 val_83 2008-12-31 12
+136 val_136 2008-12-31 12
+353 val_353 2008-12-31 12
+172 val_172 2008-12-31 12
+214 val_214 2008-12-31 12
+462 val_462 2008-12-31 12
+233 val_233 2008-12-31 12
+406 val_406 2008-12-31 12
+133 val_133 2008-12-31 12
+175 val_175 2008-12-31 12
+189 val_189 2008-12-31 12
+454 val_454 2008-12-31 12
+375 val_375 2008-12-31 12
+401 val_401 2008-12-31 12
+421 val_421 2008-12-31 12
+407 val_407 2008-12-31 12
+384 val_384 2008-12-31 12
+256 val_256 2008-12-31 12
+26 val_26 2008-12-31 12
+134 val_134 2008-12-31 12
+67 val_67 2008-12-31 12
+384 val_384 2008-12-31 12
+379 val_379 2008-12-31 12
+18 val_18 2008-12-31 12
+462 val_462 2008-12-31 12
+492 val_492 2008-12-31 12
+100 val_100 2008-12-31 12
+298 val_298 2008-12-31 12
+9 val_9 2008-12-31 12
+341 val_341 2008-12-31 12
+498 val_498 2008-12-31 12
+146 val_146 2008-12-31 12
+458 val_458 2008-12-31 12
+362 val_362 2008-12-31 12
+186 val_186 2008-12-31 12
+285 val_285 2008-12-31 12
+348 val_348 2008-12-31 12
+167 val_167 2008-12-31 12
+18 val_18 2008-12-31 12
+273 val_273 2008-12-31 12
+183 val_183 2008-12-31 12
+281 val_281 2008-12-31 12
+344 val_344 2008-12-31 12
+97 val_97 2008-12-31 12
+469 val_469 2008-12-31 12
+315 val_315 2008-12-31 12
+84 val_84 2008-12-31 12
+28 val_28 2008-12-31 12
+37 val_37 2008-12-31 12
+448 val_448 2008-12-31 12
+152 val_152 2008-12-31 12
+348 val_348 2008-12-31 12
+307 val_307 2008-12-31 12
+194 val_194 2008-12-31 12
+414 val_414 2008-12-31 12
+477 val_477 2008-12-31 12
+222 val_222 2008-12-31 12
+126 val_126 2008-12-31 12
+90 val_90 2008-12-31 12
+169 val_169 2008-12-31 12
+403 val_403 2008-12-31 12
+400 val_400 2008-12-31 12
+200 val_200 2008-12-31 12
+97 val_97 2008-12-31 12
+PREHOOK: query: describe extended nzhang_part1 partition(ds='2008-04-08',hr=11)
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended nzhang_part1 partition(ds='2008-04-08',hr=11)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+key string default
+value string default
+ds string
+hr string
+
+Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:nzhang_part1, createTime:1281384647, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/nzhang_part1/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1281384649, numRows=500, totalSize=5812})
+PREHOOK: query: describe extended nzhang_part1 partition(ds='2008-04-08',hr=12)
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended nzhang_part1 partition(ds='2008-04-08',hr=12)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+key string default
+value string default
+ds string
+hr string
+
+Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:nzhang_part1, createTime:1281384648, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/nzhang_part1/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1281384649, numRows=500, totalSize=5812})
+PREHOOK: query: describe extended nzhang_part2 partition(ds='2008-12-31',hr=11)
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended nzhang_part2 partition(ds='2008-12-31',hr=11)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+key string default
+value string default
+ds string
+hr string
+
+Detailed Partition Information Partition(values:[2008-12-31, 11], dbName:default, tableName:nzhang_part2, createTime:1281384648, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/nzhang_part2/ds=2008-12-31/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1281384649, numRows=500, totalSize=5812})
+PREHOOK: query: describe extended nzhang_part2 partition(ds='2008-12-31',hr=12)
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended nzhang_part2 partition(ds='2008-12-31',hr=12)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+key string default
+value string default
+ds string
+hr string
+
+Detailed Partition Information Partition(values:[2008-12-31, 12], dbName:default, tableName:nzhang_part2, createTime:1281384648, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/nzhang_part2/ds=2008-12-31/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1281384649, numRows=500, totalSize=5812})
+PREHOOK: query: describe extended nzhang_part1
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended nzhang_part1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+key string default
+value string default
+ds string
+hr string
+
+Detailed Table Information Table(tableName:nzhang_part1, dbName:default, owner:null, createTime:1281384642, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/nzhang_part1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, EXTERNAL=FALSE, numFiles=2, transient_lastDdlTime=1281384649, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
+PREHOOK: query: describe extended nzhang_part2
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended nzhang_part2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+key string default
+value string default
+ds string
+hr string
+
+Detailed Table Information Table(tableName:nzhang_part2, dbName:default, owner:null, createTime:1281384642, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/nzhang_part2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, EXTERNAL=FALSE, numFiles=2, transient_lastDdlTime=1281384649, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
+PREHOOK: query: drop table nzhang_part1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table nzhang_part1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: default@nzhang_part1
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+PREHOOK: query: drop table nzhang_part2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table nzhang_part2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: default@nzhang_part2
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
Index: ql/src/test/results/clientpositive/stats5.q.out
===================================================================
--- ql/src/test/results/clientpositive/stats5.q.out (revision 0)
+++ ql/src/test/results/clientpositive/stats5.q.out (revision 0)
@@ -0,0 +1,37 @@
+PREHOOK: query: explain analyze table src compute statistics
+PREHOOK: type: null
+POSTHOOK: query: explain analyze table src compute statistics
+POSTHOOK: type: null
+ABSTRACT SYNTAX TREE:
+ (TOK_ANALYZE (TOK_TABTYPE src))
+
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+ Stage-1 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-0
+ Map Reduce
+ Alias -> Map Operator Tree:
+ src
+ TableScan
+ alias: src
+
+ Stage: Stage-1
+ Stats Operator
+
+
+PREHOOK: query: analyze table src compute statistics
+PREHOOK: type: null
+PREHOOK: Input: default@src
+POSTHOOK: query: analyze table src compute statistics
+POSTHOOK: type: null
+POSTHOOK: Input: default@src
+PREHOOK: query: describe extended src
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended src
+POSTHOOK: type: DESCTABLE
+key string default
+value string default
+
+Detailed Table Information Table(tableName:src, dbName:default, owner:null, createTime:1281386650, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/src, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1281386656, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
Index: ql/src/test/results/clientpositive/stats6.q.out
===================================================================
--- ql/src/test/results/clientpositive/stats6.q.out (revision 0)
+++ ql/src/test/results/clientpositive/stats6.q.out (revision 0)
@@ -0,0 +1,16 @@
+PREHOOK: query: analyze table srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics
+PREHOOK: type: null
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: query: analyze table srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics
+POSTHOOK: type: null
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: query: describe extended srcpart PARTITION(ds='2008-04-08',hr=11)
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended srcpart PARTITION(ds='2008-04-08',hr=11)
+POSTHOOK: type: DESCTABLE
+key string default
+value string default
+ds string
+hr string
+
+Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:srcpart, createTime:1280871222, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1280871231, numRows=500, totalSize=4096})
Index: ql/src/test/results/clientpositive/stats1.q.out
===================================================================
--- ql/src/test/results/clientpositive/stats1.q.out (revision 0)
+++ ql/src/test/results/clientpositive/stats1.q.out (revision 0)
@@ -0,0 +1,209 @@
+PREHOOK: query: -- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by filesink
+
+drop table tmptable
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by filesink
+
+drop table tmptable
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table tmptable(key string, value string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table tmptable(key string, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@tmptable
+PREHOOK: query: explain
+insert overwrite table tmptable
+ select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
+ UNION ALL
+ select s2.key as key, s2.value as value from src1 s2) unionsrc
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table tmptable
+ select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
+ UNION ALL
+ select s2.key as key, s2.value as value from src1 s2) unionsrc
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+ (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF src s1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 'tst1' key) (TOK_SELEXPR (TOK_FUNCTION TOK_STRING (TOK_FUNCTION count 1)) value)))) (TOK_QUERY (TOK_FROM (TOK_TABREF src1 s2)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL s2) key) key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL s2) value) value))))) unionsrc)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB tmptable)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL unionsrc) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL unionsrc) value)))))
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1, Stage-4
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-4 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Alias -> Map Operator Tree:
+ null-subquery1:unionsrc-subquery1:s1
+ TableScan
+ alias: s1
+ Select Operator
+ Group By Operator
+ aggregations:
+ expr: count(1)
+ bucketGroup: false
+ mode: hash
+ outputColumnNames: _col0
+ Reduce Output Operator
+ sort order:
+ tag: -1
+ value expressions:
+ expr: _col0
+ type: bigint
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations:
+ expr: count(VALUE._col0)
+ bucketGroup: false
+ mode: mergepartial
+ outputColumnNames: _col0
+ Select Operator
+ expressions:
+ expr: 'tst1'
+ type: string
+ expr: UDFToString(_col0)
+ type: string
+ outputColumnNames: _col0, _col1
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+ Stage: Stage-2
+ Map Reduce
+ Alias -> Map Operator Tree:
+ file:/tmp/aaly/hive_2010-08-09_13-32-25_295_4586645755203915221/-mr-10002
+ Union
+ Select Operator
+ expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ outputColumnNames: _col0, _col1
+ File Output Operator
+ compressed: false
+ GlobalTableId: 1
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: tmptable
+ file:/tmp/aaly/hive_2010-08-09_13-32-25_295_4586645755203915221/-mr-10003
+ Union
+ Select Operator
+ expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ outputColumnNames: _col0, _col1
+ File Output Operator
+ compressed: false
+ GlobalTableId: 1
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: tmptable
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: tmptable
+
+ Stage: Stage-3
+ Stats Operator
+
+ Stage: Stage-4
+ Map Reduce
+ Alias -> Map Operator Tree:
+ null-subquery2:unionsrc-subquery2:s2
+ TableScan
+ alias: s2
+ Select Operator
+ expressions:
+ expr: key
+ type: string
+ expr: value
+ type: string
+ outputColumnNames: _col0, _col1
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+
+PREHOOK: query: insert overwrite table tmptable
+select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
+ UNION ALL
+ select s2.key as key, s2.value as value from src1 s2) unionsrc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@tmptable
+POSTHOOK: query: insert overwrite table tmptable
+select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
+ UNION ALL
+ select s2.key as key, s2.value as value from src1 s2) unionsrc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@tmptable
+POSTHOOK: Lineage: tmptable.key EXPRESSION [(src1)s2.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tmptable.value EXPRESSION [(src)s1.null, (src1)s2.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from tmptable x sort by x.key, x.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tmptable
+PREHOOK: Output: file:/tmp/aaly/hive_2010-08-09_13-32-34_786_8877460730892721060/-mr-10000
+POSTHOOK: query: select * from tmptable x sort by x.key, x.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tmptable
+POSTHOOK: Output: file:/tmp/aaly/hive_2010-08-09_13-32-34_786_8877460730892721060/-mr-10000
+POSTHOOK: Lineage: tmptable.key EXPRESSION [(src1)s2.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tmptable.value EXPRESSION [(src)s1.null, (src1)s2.FieldSchema(name:value, type:string, comment:default), ]
+
+
+
+
+ val_165
+ val_193
+ val_265
+ val_27
+ val_409
+ val_484
+128
+146 val_146
+150 val_150
+213 val_213
+224
+238 val_238
+255 val_255
+273 val_273
+278 val_278
+311 val_311
+369
+401 val_401
+406 val_406
+66 val_66
+98 val_98
+tst1 500
+PREHOOK: query: drop table tmptable
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table tmptable
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: default@tmptable
+POSTHOOK: Lineage: tmptable.key EXPRESSION [(src1)s2.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tmptable.value EXPRESSION [(src)s1.null, (src1)s2.FieldSchema(name:value, type:string, comment:default), ]
Index: ql/src/test/results/clientpositive/stats10.q.out
===================================================================
--- ql/src/test/results/clientpositive/stats10.q.out (revision 0)
+++ ql/src/test/results/clientpositive/stats10.q.out (revision 0)
@@ -0,0 +1,442 @@
+PREHOOK: query: CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@bucket3_1
+PREHOOK: query: explain
+insert overwrite table bucket3_1 partition (ds='1')
+select * from src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table bucket3_1 partition (ds='1')
+select * from src
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+ (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB bucket3_1 (TOK_PARTSPEC (TOK_PARTVAL ds '1')))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+ Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Alias -> Map Operator Tree:
+ src
+ TableScan
+ alias: src
+ Select Operator
+ expressions:
+ expr: key
+ type: string
+ expr: value
+ type: string
+ outputColumnNames: _col0, _col1
+ Reduce Output Operator
+ sort order:
+ Map-reduce partition columns:
+ expr: UDFToInteger(_col0)
+ type: int
+ tag: -1
+ value expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ Reduce Operator Tree:
+ Extract
+ Select Operator
+ expressions:
+ expr: UDFToInteger(_col0)
+ type: int
+ expr: _col1
+ type: string
+ outputColumnNames: _col0, _col1
+ File Output Operator
+ compressed: false
+ GlobalTableId: 1
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: bucket3_1
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds 1
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: bucket3_1
+
+ Stage: Stage-2
+ Stats Operator
+
+
+PREHOOK: query: insert overwrite table bucket3_1 partition (ds='1')
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket3_1@ds=1
+POSTHOOK: query: insert overwrite table bucket3_1 partition (ds='1')
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket3_1@ds=1
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table bucket3_1 partition (ds='1')
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket3_1@ds=1
+POSTHOOK: query: insert overwrite table bucket3_1 partition (ds='1')
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket3_1@ds=1
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table bucket3_1 partition (ds='2')
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket3_1@ds=2
+POSTHOOK: query: insert overwrite table bucket3_1 partition (ds='2')
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket3_1@ds=2
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket3_1@ds=1
+PREHOOK: Output: file:/tmp/aaly/hive_2010-08-09_13-51-37_053_514290142284583234/-mr-10000
+POSTHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket3_1@ds=1
+POSTHOOK: Output: file:/tmp/aaly/hive_2010-08-09_13-51-37_053_514290142284583234/-mr-10000
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+0 val_0 1
+0 val_0 1
+0 val_0 1
+2 val_2 1
+4 val_4 1
+8 val_8 1
+10 val_10 1
+12 val_12 1
+12 val_12 1
+18 val_18 1
+18 val_18 1
+20 val_20 1
+24 val_24 1
+24 val_24 1
+26 val_26 1
+26 val_26 1
+28 val_28 1
+30 val_30 1
+34 val_34 1
+42 val_42 1
+42 val_42 1
+44 val_44 1
+54 val_54 1
+58 val_58 1
+58 val_58 1
+64 val_64 1
+66 val_66 1
+70 val_70 1
+70 val_70 1
+70 val_70 1
+72 val_72 1
+72 val_72 1
+74 val_74 1
+76 val_76 1
+76 val_76 1
+78 val_78 1
+80 val_80 1
+82 val_82 1
+84 val_84 1
+84 val_84 1
+86 val_86 1
+90 val_90 1
+90 val_90 1
+90 val_90 1
+92 val_92 1
+96 val_96 1
+98 val_98 1
+98 val_98 1
+100 val_100 1
+100 val_100 1
+104 val_104 1
+104 val_104 1
+114 val_114 1
+116 val_116 1
+118 val_118 1
+118 val_118 1
+120 val_120 1
+120 val_120 1
+126 val_126 1
+128 val_128 1
+128 val_128 1
+128 val_128 1
+134 val_134 1
+134 val_134 1
+136 val_136 1
+138 val_138 1
+138 val_138 1
+138 val_138 1
+138 val_138 1
+146 val_146 1
+146 val_146 1
+150 val_150 1
+152 val_152 1
+152 val_152 1
+156 val_156 1
+158 val_158 1
+160 val_160 1
+162 val_162 1
+164 val_164 1
+164 val_164 1
+166 val_166 1
+168 val_168 1
+170 val_170 1
+172 val_172 1
+172 val_172 1
+174 val_174 1
+174 val_174 1
+176 val_176 1
+176 val_176 1
+178 val_178 1
+180 val_180 1
+186 val_186 1
+190 val_190 1
+192 val_192 1
+194 val_194 1
+196 val_196 1
+200 val_200 1
+200 val_200 1
+202 val_202 1
+208 val_208 1
+208 val_208 1
+208 val_208 1
+214 val_214 1
+216 val_216 1
+216 val_216 1
+218 val_218 1
+222 val_222 1
+224 val_224 1
+224 val_224 1
+226 val_226 1
+228 val_228 1
+230 val_230 1
+230 val_230 1
+230 val_230 1
+230 val_230 1
+230 val_230 1
+238 val_238 1
+238 val_238 1
+242 val_242 1
+242 val_242 1
+244 val_244 1
+248 val_248 1
+252 val_252 1
+256 val_256 1
+256 val_256 1
+258 val_258 1
+260 val_260 1
+262 val_262 1
+266 val_266 1
+272 val_272 1
+272 val_272 1
+274 val_274 1
+278 val_278 1
+278 val_278 1
+280 val_280 1
+280 val_280 1
+282 val_282 1
+282 val_282 1
+284 val_284 1
+286 val_286 1
+288 val_288 1
+288 val_288 1
+292 val_292 1
+296 val_296 1
+298 val_298 1
+298 val_298 1
+298 val_298 1
+302 val_302 1
+306 val_306 1
+308 val_308 1
+310 val_310 1
+316 val_316 1
+316 val_316 1
+316 val_316 1
+318 val_318 1
+318 val_318 1
+318 val_318 1
+322 val_322 1
+322 val_322 1
+332 val_332 1
+336 val_336 1
+338 val_338 1
+342 val_342 1
+342 val_342 1
+344 val_344 1
+344 val_344 1
+348 val_348 1
+348 val_348 1
+348 val_348 1
+348 val_348 1
+348 val_348 1
+356 val_356 1
+360 val_360 1
+362 val_362 1
+364 val_364 1
+366 val_366 1
+368 val_368 1
+374 val_374 1
+378 val_378 1
+382 val_382 1
+382 val_382 1
+384 val_384 1
+384 val_384 1
+384 val_384 1
+386 val_386 1
+392 val_392 1
+394 val_394 1
+396 val_396 1
+396 val_396 1
+396 val_396 1
+400 val_400 1
+402 val_402 1
+404 val_404 1
+404 val_404 1
+406 val_406 1
+406 val_406 1
+406 val_406 1
+406 val_406 1
+414 val_414 1
+414 val_414 1
+418 val_418 1
+424 val_424 1
+424 val_424 1
+430 val_430 1
+430 val_430 1
+430 val_430 1
+432 val_432 1
+436 val_436 1
+438 val_438 1
+438 val_438 1
+438 val_438 1
+444 val_444 1
+446 val_446 1
+448 val_448 1
+452 val_452 1
+454 val_454 1
+454 val_454 1
+454 val_454 1
+458 val_458 1
+458 val_458 1
+460 val_460 1
+462 val_462 1
+462 val_462 1
+466 val_466 1
+466 val_466 1
+466 val_466 1
+468 val_468 1
+468 val_468 1
+468 val_468 1
+468 val_468 1
+470 val_470 1
+472 val_472 1
+478 val_478 1
+478 val_478 1
+480 val_480 1
+480 val_480 1
+480 val_480 1
+482 val_482 1
+484 val_484 1
+490 val_490 1
+492 val_492 1
+492 val_492 1
+494 val_494 1
+496 val_496 1
+498 val_498 1
+498 val_498 1
+498 val_498 1
+PREHOOK: query: analyze table bucket3_1 compute statistics
+PREHOOK: type: null
+PREHOOK: Input: default@bucket3_1@ds=1
+PREHOOK: Input: default@bucket3_1@ds=2
+POSTHOOK: query: analyze table bucket3_1 compute statistics
+POSTHOOK: type: null
+POSTHOOK: Input: default@bucket3_1@ds=1
+POSTHOOK: Input: default@bucket3_1@ds=2
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe extended bucket3_1
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended bucket3_1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+key int
+value string
+ds string
+
+Detailed Table Information Table(tableName:bucket3_1, dbName:default, owner:aaly, createTime:1281387084, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/bucket3_1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=2, numFiles=4, transient_lastDdlTime=1281387103, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)
+PREHOOK: query: describe extended bucket3_1 partition (ds='1')
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended bucket3_1 partition (ds='1')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+key int
+value string
+ds string
+
+Detailed Partition Information Partition(values:[1], dbName:default, tableName:bucket3_1, createTime:1281387088, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/bucket3_1/ds=1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), parameters:{numFiles=2, transient_lastDdlTime=1281387103, numRows=500, totalSize=5812})
+PREHOOK: query: describe extended bucket3_1 partition (ds='2')
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended bucket3_1 partition (ds='2')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+key int
+value string
+ds string
+
+Detailed Partition Information Partition(values:[2], dbName:default, tableName:bucket3_1, createTime:1281387096, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/bucket3_1/ds=2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), parameters:{numFiles=2, transient_lastDdlTime=1281387103, numRows=500, totalSize=5812})
Index: ql/src/test/results/clientpositive/stats7.q.out
===================================================================
--- ql/src/test/results/clientpositive/stats7.q.out (revision 0)
+++ ql/src/test/results/clientpositive/stats7.q.out (revision 0)
@@ -0,0 +1,28 @@
+PREHOOK: query: analyze table srcpart PARTITION(ds='2008-04-08',hr) compute statistics
+PREHOOK: type: null
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: query: analyze table srcpart PARTITION(ds='2008-04-08',hr) compute statistics
+POSTHOOK: type: null
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: query: describe extended srcpart PARTITION(ds='2008-04-08',hr=11)
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended srcpart PARTITION(ds='2008-04-08',hr=11)
+POSTHOOK: type: DESCTABLE
+key string default
+value string default
+ds string
+hr string
+
+Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:srcpart, createTime:1280871291, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1280871301, numRows=500, totalSize=4096})
+PREHOOK: query: describe extended srcpart PARTITION(ds='2008-04-08',hr=12)
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe extended srcpart PARTITION(ds='2008-04-08',hr=12)
+POSTHOOK: type: DESCTABLE
+key string default
+value string default
+ds string
+hr string
+
+Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:srcpart, createTime:1280871292, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/aaly/work/apache-hive/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1280871301, numRows=500, totalSize=4096})
Index: ql/src/test/results/clientpositive/stats2.q.out
===================================================================
--- ql/src/test/results/clientpositive/stats2.q.out (revision 0)
+++ ql/src/test/results/clientpositive/stats2.q.out (revision 0)
@@ -0,0 +1,12 @@
+PREHOOK: query: analyze table srcpart compute statistics
+PREHOOK: type: null
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: query: analyze table srcpart compute statistics
+POSTHOOK: type: null
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
Index: ql/src/test/queries/clientpositive/stats8.q
===================================================================
--- ql/src/test/queries/clientpositive/stats8.q (revision 0)
+++ ql/src/test/queries/clientpositive/stats8.q (revision 0)
@@ -0,0 +1,6 @@
+analyze table srcpart PARTITION(ds, hr) compute statistics;
+
+describe extended srcpart PARTITION(ds='2008-04-08',hr=11);
+describe extended srcpart PARTITION(ds='2008-04-09',hr=12);
+describe extended srcpart PARTITION(ds='2008-04-08',hr=11);
+describe extended srcpart PARTITION(ds='2008-04-09',hr=12);
\ No newline at end of file
Index: ql/src/test/queries/clientpositive/stats9.q
===================================================================
--- ql/src/test/queries/clientpositive/stats9.q (revision 0)
+++ ql/src/test/queries/clientpositive/stats9.q (revision 0)
@@ -0,0 +1,5 @@
+analyze table srcbucket compute statistics;
+describe extended srcbucket;
+
+analyze table srcbucket2 compute statistics;
+describe extended srcbucket2;
\ No newline at end of file
Index: ql/src/test/queries/clientpositive/stats1.q
===================================================================
--- ql/src/test/queries/clientpositive/stats1.q (revision 0)
+++ ql/src/test/queries/clientpositive/stats1.q (revision 0)
@@ -0,0 +1,24 @@
+
+set hive.merge.mapfiles=false;
+set hive.merge.mapredfiles=false;
+set hive.map.aggr = true;
+
+-- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by filesink
+
+drop table tmptable;
+create table tmptable(key string, value string);
+
+explain
+insert overwrite table tmptable
+ select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
+ UNION ALL
+ select s2.key as key, s2.value as value from src1 s2) unionsrc;
+
+insert overwrite table tmptable
+select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
+ UNION ALL
+ select s2.key as key, s2.value as value from src1 s2) unionsrc;
+
+select * from tmptable x sort by x.key, x.value;
+
+drop table tmptable;
\ No newline at end of file
Index: ql/src/test/queries/clientpositive/stats10.q
===================================================================
--- ql/src/test/queries/clientpositive/stats10.q (revision 0)
+++ ql/src/test/queries/clientpositive/stats10.q (revision 0)
@@ -0,0 +1,26 @@
+set hive.enforce.bucketing = true;
+set hive.exec.reducers.max = 1;
+
+CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS;
+
+explain
+insert overwrite table bucket3_1 partition (ds='1')
+select * from src;
+
+insert overwrite table bucket3_1 partition (ds='1')
+select * from src;
+
+insert overwrite table bucket3_1 partition (ds='1')
+select * from src;
+
+insert overwrite table bucket3_1 partition (ds='2')
+select * from src;
+
+select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key;
+
+analyze table bucket3_1 compute statistics;
+
+
+describe extended bucket3_1;
+describe extended bucket3_1 partition (ds='1');
+describe extended bucket3_1 partition (ds='2');
\ No newline at end of file
Index: ql/src/test/queries/clientpositive/stats2.q
===================================================================
--- ql/src/test/queries/clientpositive/stats2.q (revision 0)
+++ ql/src/test/queries/clientpositive/stats2.q (revision 0)
@@ -0,0 +1,3 @@
+
+
+analyze table srcpart compute statistics;
Index: ql/src/test/queries/clientpositive/stats3.q
===================================================================
--- ql/src/test/queries/clientpositive/stats3.q (revision 0)
+++ ql/src/test/queries/clientpositive/stats3.q (revision 0)
@@ -0,0 +1,23 @@
+drop table hive_test_src;
+drop table hive_test_dst;
+
+create table hive_test_src ( col1 string ) stored as textfile ;
+load data local inpath '../data/files/test.dat' overwrite into table hive_test_src ;
+
+create table hive_test_dst ( col1 string ) partitioned by ( pcol1 string , pcol2 string) stored as sequencefile;
+insert overwrite table hive_test_dst partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src ;
+select * from hive_test_dst where pcol1='test_part' and pcol2='test_Part';
+
+select count(1) from hive_test_dst;
+
+insert overwrite table hive_test_dst partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src ;
+select * from hive_test_dst where pcol1='test_part' and pcol2='test_part';
+
+select count(1) from hive_test_dst;
+
+select * from hive_test_dst where pcol1='test_part';
+select * from hive_test_dst where pcol1='test_part' and pcol2='test_part';
+select * from hive_test_dst where pcol1='test_Part';
+
+drop table hive_test_src;
+drop table hive_test_dst;
\ No newline at end of file
Index: ql/src/test/queries/clientpositive/stats4.q
===================================================================
--- ql/src/test/queries/clientpositive/stats4.q (revision 0)
+++ ql/src/test/queries/clientpositive/stats4.q (revision 0)
@@ -0,0 +1,38 @@
+show partitions srcpart;
+
+drop table nzhang_part1;
+drop table nzhang_part2;
+
+create table if not exists nzhang_part1 like srcpart;
+create table if not exists nzhang_part2 like srcpart;
+
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.exec.dynamic.partition=true;
+set hive.stats.autogather=true;
+
+explain
+from srcpart
+insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08';
+
+from srcpart
+insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08';
+
+
+show partitions nzhang_part1;
+show partitions nzhang_part2;
+
+select * from nzhang_part1 where ds is not null and hr is not null;
+select * from nzhang_part2 where ds is not null and hr is not null;
+
+describe extended nzhang_part1 partition(ds='2008-04-08',hr=11);
+describe extended nzhang_part1 partition(ds='2008-04-08',hr=12);
+describe extended nzhang_part2 partition(ds='2008-12-31',hr=11);
+describe extended nzhang_part2 partition(ds='2008-12-31',hr=12);
+
+describe extended nzhang_part1;
+describe extended nzhang_part2;
+
+drop table nzhang_part1;
+drop table nzhang_part2;
\ No newline at end of file
Index: ql/src/test/queries/clientpositive/stats5.q
===================================================================
--- ql/src/test/queries/clientpositive/stats5.q (revision 0)
+++ ql/src/test/queries/clientpositive/stats5.q (revision 0)
@@ -0,0 +1,7 @@
+
+
+explain analyze table src compute statistics;
+
+analyze table src compute statistics;
+
+describe extended src;
\ No newline at end of file
Index: ql/src/test/queries/clientpositive/stats6.q
===================================================================
--- ql/src/test/queries/clientpositive/stats6.q (revision 0)
+++ ql/src/test/queries/clientpositive/stats6.q (revision 0)
@@ -0,0 +1,3 @@
+analyze table srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics;
+
+describe extended srcpart PARTITION(ds='2008-04-08',hr=11);
\ No newline at end of file
Index: ql/src/test/queries/clientpositive/stats7.q
===================================================================
--- ql/src/test/queries/clientpositive/stats7.q (revision 0)
+++ ql/src/test/queries/clientpositive/stats7.q (revision 0)
@@ -0,0 +1,4 @@
+analyze table srcpart PARTITION(ds='2008-04-08',hr) compute statistics;
+
+describe extended srcpart PARTITION(ds='2008-04-08',hr=11);
+describe extended srcpart PARTITION(ds='2008-04-08',hr=12);
\ No newline at end of file
Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java (revision 982011)
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java (working copy)
@@ -32,6 +32,8 @@
import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.StatsWork;
/**
* Processor for the rule - table scan.
@@ -54,12 +56,12 @@
GenMRProcContext ctx = (GenMRProcContext) opProcCtx;
ParseContext parseCtx = ctx.getParseCtx();
Map, GenMapRedCtx> mapCurrCtx = ctx
- .getMapCurrCtx();
+ .getMapCurrCtx();
// create a dummy task
Task extends Serializable> currTask =
TaskFactory.get(GenMapRedUtils.getMapRedWork(parseCtx.getConf()),
- parseCtx.getConf());
+ parseCtx.getConf());
Operator extends Serializable> currTopOp = op;
ctx.setCurrTask(currTask);
ctx.setCurrTopOp(currTopOp);
@@ -70,6 +72,14 @@
String currAliasId = alias;
ctx.setCurrAliasId(currAliasId);
mapCurrCtx.put(op, new GenMapRedCtx(currTask, currTopOp, currAliasId));
+
+ if (parseCtx.getQB().getParseInfo().isAnalyzeCommand()) {
+ StatsWork statsWork = new StatsWork(parseCtx.getQB().getParseInfo().getTableSpec());
+ Task statsTask = TaskFactory.get(statsWork, parseCtx.getConf());
+ currTask.addDependentTask(statsTask);
+ ctx.getRootTasks().add(currTask);
+ GenMapRedUtils.setTaskPlan(currAliasId, currTopOp, (MapredWork) currTask.getWork(), false, ctx);
+ }
return null;
}
}
Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java (revision 982011)
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java (working copy)
@@ -38,7 +38,6 @@
import org.apache.hadoop.hive.ql.exec.RowSchema;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.TaskFactory;
-import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.exec.UnionOperator;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.lib.Node;
@@ -51,6 +50,7 @@
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory;
import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles;
+import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles.ConditionalResolverMergeFilesCtx;
import org.apache.hadoop.hive.ql.plan.ConditionalWork;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -63,9 +63,9 @@
import org.apache.hadoop.hive.ql.plan.PartitionDesc;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
+import org.apache.hadoop.hive.ql.plan.StatsWork;
import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.plan.TableScanDesc;
-import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles.ConditionalResolverMergeFilesCtx;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
/**
@@ -104,16 +104,28 @@
// no need of merging if the move is to a local file system
MoveTask mvTask = (MoveTask) findMoveTask(mvTasks,
(FileSinkOperator) nd);
+
+ // Add the stats task as a dependent task of the move task
+ if (((FileSinkOperator) nd).getConf().getTableInfo().getTableName() != null && ctx.getParseCtx().getQB().getParseInfo().isInsertToTable() /*&& !fsOp.getConf().isGatherStats()*/) {
+ System.out.println("AHMEDDDDDDDDD process MRFSK1");
+ ((FileSinkOperator) nd).getConf().setGatherStats(true);
+ MoveWork mvWork = ((MoveTask)mvTask).getWork();
+ StatsWork statsWork = new StatsWork(mvWork.getLoadTableWork());
+ Task extends Serializable> statsTask = TaskFactory.get(statsWork, ctx.getParseCtx().getConf());
+ mvTask.addDependentTask(statsTask);
+ }
+
+
if ((mvTask != null) && !mvTask.isLocal()) {
// There are separate configuration parameters to control whether to
// merge for a map-only job
// or for a map-reduce job
if ((parseCtx.getConf().getBoolVar(
HiveConf.ConfVars.HIVEMERGEMAPFILES) && (((MapredWork) currTask
- .getWork()).getReducer() == null))
- || (parseCtx.getConf().getBoolVar(
- HiveConf.ConfVars.HIVEMERGEMAPREDFILES) && (((MapredWork) currTask
- .getWork()).getReducer() != null))) {
+ .getWork()).getReducer() == null))
+ || (parseCtx.getConf().getBoolVar(
+ HiveConf.ConfVars.HIVEMERGEMAPREDFILES) && (((MapredWork) currTask
+ .getWork()).getReducer() != null))) {
chDir = true;
}
}
@@ -169,7 +181,7 @@
// Add the extract operator to get the value fields
RowResolver out_rwsch = new RowResolver();
RowResolver interim_rwsch = ctx.getParseCtx().getOpParseCtx().get(fsOp)
- .getRR();
+ .getRR();
Integer pos = Integer.valueOf(0);
for (ColumnInfo colInfo : interim_rwsch.getColumnInfos()) {
String[] info = interim_rwsch.reverseLookup(colInfo.getInternalName());
@@ -180,17 +192,17 @@
Operator extract = OperatorFactory.getAndMakeChild(new ExtractDesc(
new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo,
- Utilities.ReduceField.VALUE.toString(), "", false)),
- new RowSchema(out_rwsch.getColumnInfos()));
+ Utilities.ReduceField.VALUE.toString(), "", false)),
+ new RowSchema(out_rwsch.getColumnInfos()));
TableDesc ts = (TableDesc) fsConf.getTableInfo().clone();
fsConf
- .getTableInfo()
- .getProperties()
- .remove(
+ .getTableInfo()
+ .getProperties()
+ .remove(
org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_PARTITION_COLUMNS);
FileSinkOperator newOutput = (FileSinkOperator) OperatorFactory
- .getAndMakeChild(new FileSinkDesc(finalName, ts, parseCtx.getConf()
+ .getAndMakeChild(new FileSinkDesc(finalName, ts, parseCtx.getConf()
.getBoolVar(HiveConf.ConfVars.COMPRESSRESULT)), fsRS, extract);
cplan.setReducer(extract);
@@ -301,7 +313,7 @@
Operator extends Serializable> currTopOp = ctx.getCurrTopOp();
String currAliasId = ctx.getCurrAliasId();
HashMap, Task extends Serializable>> opTaskMap = ctx
- .getOpTaskMap();
+ .getOpTaskMap();
List> seenOps = ctx.getSeenOps();
List> rootTasks = ctx.getRootTasks();
@@ -332,7 +344,7 @@
// mapTask and currTask should be merged by and join/union operator
// (e.g., GenMRUnion1j) which has multiple topOps.
assert mapTask == currTask : "mapTask.id = " + mapTask.getId()
- + "; currTask.id = " + currTask.getId();
+ + "; currTask.id = " + currTask.getId();
}
return dest;
Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (revision 982011)
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (working copy)
@@ -59,16 +59,16 @@
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.FetchWork;
import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
+import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc;
import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
+import org.apache.hadoop.hive.ql.plan.MapredLocalWork.BucketMapJoinContext;
import org.apache.hadoop.hive.ql.plan.MapredWork;
import org.apache.hadoop.hive.ql.plan.PartitionDesc;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.plan.TableScanDesc;
-import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc;
-import org.apache.hadoop.hive.ql.plan.MapredLocalWork.BucketMapJoinContext;
/**
* General utility common functions for the Processor to convert operator into
@@ -90,15 +90,15 @@
* processing context
*/
public static void initPlan(ReduceSinkOperator op, GenMRProcContext opProcCtx)
- throws SemanticException {
+ throws SemanticException {
Operator extends Serializable> reducer = op.getChildOperators().get(0);
Map, GenMapRedCtx> mapCurrCtx = opProcCtx
- .getMapCurrCtx();
+ .getMapCurrCtx();
GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0));
Task extends Serializable> currTask = mapredCtx.getCurrTask();
MapredWork plan = (MapredWork) currTask.getWork();
HashMap, Task extends Serializable>> opTaskMap = opProcCtx
- .getOpTaskMap();
+ .getOpTaskMap();
Operator extends Serializable> currTopOp = opProcCtx.getCurrTopOp();
opTaskMap.put(reducer, currTask);
@@ -148,9 +148,9 @@
public static void initMapJoinPlan(Operator extends Serializable> op,
GenMRProcContext opProcCtx, boolean readInputMapJoin,
boolean readInputUnion, boolean setReducer, int pos, boolean createLocalPlan)
- throws SemanticException {
+ throws SemanticException {
Map, GenMapRedCtx> mapCurrCtx = opProcCtx
- .getMapCurrCtx();
+ .getMapCurrCtx();
assert (((pos == -1) && (readInputMapJoin)) || (pos != -1));
int parentPos = (pos == -1) ? 0 : pos;
GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(
@@ -158,7 +158,7 @@
Task extends Serializable> currTask = mapredCtx.getCurrTask();
MapredWork plan = (MapredWork) currTask.getWork();
HashMap, Task extends Serializable>> opTaskMap = opProcCtx
- .getOpTaskMap();
+ .getOpTaskMap();
Operator extends Serializable> currTopOp = opProcCtx.getCurrTopOp();
// The mapjoin has already been encountered. Some context must be stored
@@ -171,7 +171,7 @@
if (setReducer) {
Operator extends Serializable> reducer = op.getChildOperators()
- .get(0);
+ .get(0);
plan.setReducer(reducer);
opTaskMap.put(reducer, currTask);
if (reducer.getClass() == JoinOperator.class) {
@@ -292,12 +292,12 @@
GenMRProcContext opProcCtx) throws SemanticException {
Operator extends Serializable> reducer = op.getChildOperators().get(0);
Map, GenMapRedCtx> mapCurrCtx = opProcCtx
- .getMapCurrCtx();
+ .getMapCurrCtx();
GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0));
Task extends Serializable> currTask = mapredCtx.getCurrTask();
MapredWork plan = (MapredWork) currTask.getWork();
HashMap, Task extends Serializable>> opTaskMap = opProcCtx
- .getOpTaskMap();
+ .getOpTaskMap();
opTaskMap.put(reducer, currTask);
plan.setReducer(reducer);
@@ -469,7 +469,7 @@
* processing context
*/
public static void splitPlan(ReduceSinkOperator op, GenMRProcContext opProcCtx)
- throws SemanticException {
+ throws SemanticException {
// Generate a new task
ParseContext parseCtx = opProcCtx.getParseCtx();
MapredWork cplan = getMapRedWork(parseCtx.getConf());
@@ -484,7 +484,7 @@
cplan.setNumReduceTasks(new Integer(desc.getNumReducers()));
HashMap, Task extends Serializable>> opTaskMap = opProcCtx
- .getOpTaskMap();
+ .getOpTaskMap();
opTaskMap.put(reducer, redTask);
Task extends Serializable> currTask = opProcCtx.getCurrTask();
@@ -558,6 +558,14 @@
for (Partition part : parts) {
if (part.getTable().isPartitioned()) {
+ if (parseCtx.getQB().getParseInfo().isAnalyzeCommand()) {
+ if (parseCtx.getQB().getParseInfo().getTableSpec().partSpec != null) {
+ if (!comparePartSpecs(parseCtx.getQB().getParseInfo().getTableSpec().partSpec, part.getSpec())) {
+ continue;
+ }
+ }
+ }
+
inputs.add(new ReadEntity(part));
} else {
inputs.add(new ReadEntity(part.getTable()));
@@ -587,8 +595,9 @@
continue;
}
String path = p.toString();
- if (LOG.isDebugEnabled())
+ if (LOG.isDebugEnabled()) {
LOG.debug("Adding " + path + " of table" + alias_id);
+ }
partDir.add(p);
try {
@@ -616,8 +625,9 @@
}
plan.getPathToAliases().get(path).add(alias_id);
plan.getPathToPartitionInfo().put(path, prtDesc);
- if (LOG.isDebugEnabled())
+ if (LOG.isDebugEnabled()) {
LOG.debug("Information added for path " + path);
+ }
}
assert plan.getAliasToWork().get(alias_id) == null;
@@ -636,10 +646,10 @@
localPlan.getAliasToWork().put(alias_id, topOp);
if (tblDir == null) {
localPlan.getAliasToFetchWork()
- .put(
+ .put(
alias_id,
new FetchWork(FetchWork.convertPathToStringArray(partDir),
- partDesc));
+ partDesc));
} else {
localPlan.getAliasToFetchWork().put(alias_id,
new FetchWork(tblDir.toString(), tblDesc));
@@ -719,7 +729,7 @@
tagToSchema.set(tag, rs.getConf().getValueSerializeInfo());
} else {
List> children = topOp
- .getChildOperators();
+ .getChildOperators();
if (children != null) {
for (Operator extends Serializable> op : children) {
setKeyAndValueDesc(plan, op);
@@ -741,7 +751,7 @@
work.setTagToValueDesc(new ArrayList());
work.setReducer(null);
work.setHadoopSupportsSplittable(
- conf.getBoolVar(HiveConf.ConfVars.HIVE_COMBINE_INPUT_FORMAT_SUPPORTS_SPLITTABLE));
+ conf.getBoolVar(HiveConf.ConfVars.HIVE_COMBINE_INPUT_FORMAT_SUPPORTS_SPLITTABLE));
return work;
}
@@ -814,7 +824,7 @@
// replace the reduce child with this operator
List> childOpList = parent
- .getChildOperators();
+ .getChildOperators();
for (int pos = 0; pos < childOpList.size(); pos++) {
if (childOpList.get(pos) == op) {
childOpList.set(pos, fs_op);
@@ -823,7 +833,7 @@
}
List> parentOpList =
- new ArrayList>();
+ new ArrayList>();
parentOpList.add(parent);
fs_op.setParentOperators(parentOpList);
@@ -837,7 +847,7 @@
op.getParentOperators().set(posn, ts_op);
Map, GenMapRedCtx> mapCurrCtx = opProcCtx
- .getMapCurrCtx();
+ .getMapCurrCtx();
mapCurrCtx.put(ts_op, new GenMapRedCtx(childTask, null, null));
String streamDesc = taskTmpDir;
@@ -962,4 +972,32 @@
// prevent instantiation
}
+ private static boolean comparePartSpecs(Map partA, LinkedHashMappartB) {
+
+ if (partA.size() != partB.size()) {
+ return false;
+ }
+ Iterator iterA = partA.keySet().iterator();
+ Iterator iterB = partB.keySet().iterator();
+
+ while (iterA.hasNext()) {
+ String keyA = iterA.next();
+ String keyB = iterB.next();
+
+ if (!keyA.equals(keyB)) {
+ LOG.error("Error, Invalid partition column");
+ return false;
+ }
+
+ if (partA.get(keyA) == null) {
+ continue;
+ }
+
+ if (!partA.get(keyA).equals(partB.get(keyB))) {
+ return false;
+ }
+ }
+
+ return true;
+ }
}
Index: ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java (revision 982011)
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java (working copy)
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.ql.exec;
+import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.util.ArrayList;
@@ -32,6 +33,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.exec.JobCloseFeedBack.FeedBackType;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
@@ -45,26 +47,28 @@
import org.apache.hadoop.hive.ql.plan.PlanUtils;
import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
+import org.apache.hadoop.hive.ql.stats.StatsFactory;
+import org.apache.hadoop.hive.ql.stats.StatsPublisher;
+import org.apache.hadoop.hive.ql.stats.StatsSetupConst;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.Serializer;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.SubStructObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.util.ReflectionUtils;
-
/**
* File Sink operator implementation.
**/
public class FileSinkOperator extends TerminalOperator implements
- Serializable {
+Serializable {
protected transient HashMap valToPaths;
protected transient int numDynParts;
@@ -100,6 +104,7 @@
Path[] outPaths;
Path[] finalPaths;
RecordWriter[] outWriters;
+ Stat stat;
public FSPaths() {
}
@@ -109,6 +114,7 @@
outPaths = new Path[numFiles];
finalPaths = new Path[numFiles];
outWriters = new RecordWriter[numFiles];
+ stat = new Stat();
}
/**
@@ -169,7 +175,7 @@
throw new HiveException(e);
}
}
- }
+ }
}
private void commit(FileSystem fs) throws HiveException {
@@ -192,10 +198,10 @@
if (outWriters[idx] != null) {
try {
outWriters[idx].close(abort);
- if (delete) {
- fs.delete(outPaths[idx], true);
- }
- updateProgress();
+ if (delete) {
+ fs.delete(outPaths[idx], true);
+ }
+ updateProgress();
} catch (IOException e) {
throw new HiveException(e);
}
@@ -261,11 +267,11 @@
/*
String specPath;
Path tmpPath;
- */
+ */
String taskId;
private boolean filesCreated = false;
- @Override
+ @Override
protected void initializeOp(Configuration hconf) throws HiveException {
try {
this.hconf = hconf;
@@ -324,12 +330,11 @@
if (!bDynParts) {
fsp = new FSPaths(specPath);
- // Create all the files - this is required because empty files need to be created for
- // empty buckets
- // createBucketFiles(fsp);
- valToPaths.put("", fsp); // special entry for non-DP case
+ // Create all the files - this is required because empty files need to be created for
+ // empty buckets
+ // createBucketFiles(fsp);
+ valToPaths.put("", fsp); // special entry for non-DP case
}
-
initializeChildren(hconf);
} catch (HiveException e) {
throw e;
@@ -428,9 +433,9 @@
}
try {
// The reason to keep these instead of using
- // OutputFormat.getRecordWriter() is that
- // getRecordWriter does not give us enough control over the file name that
- // we create.
+ // OutputFormat.getRecordWriter() is that
+ // getRecordWriter does not give us enough control over the file name that
+ // we create.
if (!bDynParts) {
fsp.finalPaths[filesIdx] = HiveFileFormatUtils.getOutputFormatFinalPath(
parent, taskId, jc, hiveOutputFormat, isCompressed, fsp.finalPaths[filesIdx]);
@@ -448,21 +453,21 @@
}
LOG.info("New Final Path: FS " + fsp.finalPaths[filesIdx]);
- if (isNativeTable) {
- try {
- // in recent hadoop versions, use deleteOnExit to clean tmp files.
- autoDelete = ShimLoader.getHadoopShims().fileSystemDeleteOnExit(
- fs, fsp.outPaths[filesIdx]);
- } catch (IOException e) {
- throw new HiveException(e);
- }
- }
+ if (isNativeTable) {
+ try {
+ // in recent hadoop versions, use deleteOnExit to clean tmp files.
+ autoDelete = ShimLoader.getHadoopShims().fileSystemDeleteOnExit(
+ fs, fsp.outPaths[filesIdx]);
+ } catch (IOException e) {
+ throw new HiveException(e);
+ }
+ }
Utilities.copyTableJobPropertiesToConf(conf.getTableInfo(), jc);
// only create bucket files only if no dynamic partitions,
// buckets of dynamic partitions will be created for each newly created partition
fsp.outWriters[filesIdx] = HiveFileFormatUtils.getHiveRecordWriter(
- jc, conf.getTableInfo(), outputClass, conf, fsp.outPaths[filesIdx]);
+ jc, conf.getTableInfo(), outputClass, conf, fsp.outPaths[filesIdx]);
filesIdx++;
}
assert filesIdx == numFiles;
@@ -545,6 +550,11 @@
recordValue = serializer.serialize(row, subSetOI);
} else {
rowOutWriters = fsp.outWriters;
+
+ if (conf.isGatherStats()) {
+ fsp.stat.increaseNumRows(1);
+ }
+
// use SerDe to serialize r, and write it out
recordValue = serializer.serialize(row, inputObjInspectors[0]);
}
@@ -561,7 +571,7 @@
for (int i = 0; i < partitionEval.length; i++) {
Object o = partitionEval[i].evaluate(row);
keyHashCode = keyHashCode * 31
- + ObjectInspectorUtils.hashCode(o, partitionObjectInspectors[i]);
+ + ObjectInspectorUtils.hashCode(o, partitionObjectInspectors[i]);
}
key.setHashCode(keyHashCode);
int bucketNum = prtner.getBucket(key, null, totalFiles);
@@ -584,6 +594,7 @@
if (dpDir != null) {
FSPaths fsp2 = valToPaths.get(dpDir);
+
if (fsp2 == null) {
// check # of dp
if (valToPaths.size() > maxPartitions) {
@@ -597,6 +608,9 @@
createBucketFiles(fsp2);
valToPaths.put(dpDir, fsp2);
}
+ if (conf.isGatherStats()) {
+ fsp2.stat.increaseNumRows(1);
+ }
rw = fsp2.outWriters;
} else {
rw = fsp.outWriters;
@@ -617,11 +631,17 @@
errMsg.append("Operator ").append(getOperatorId()).append(" (id=").append(id).append("): ");
errMsg.append(counterCode > FATAL_ERR_MSG.length - 1 ?
"fatal error":
- FATAL_ERR_MSG[(int) counterCode]);
+ FATAL_ERR_MSG[(int) counterCode]);
}
@Override
public void closeOp(boolean abort) throws HiveException {
+
+ // Only publish stats if this operator's flag was set to gather stats (in the semantic analyzer).
+ if (conf.isGatherStats()) {
+ publishStats();
+ }
+
if (!bDynParts && !filesCreated) {
createBucketFiles(fsp);
}
@@ -654,7 +674,7 @@
@Override
public void jobClose(Configuration hconf, boolean success, JobCloseFeedBack feedBack)
- throws HiveException {
+ throws HiveException {
try {
if ((conf != null) && isNativeTable) {
String specPath = conf.getDirName();
@@ -711,7 +731,7 @@
* @throws IOException
*/
private void createEmptyBuckets(Configuration hconf, ArrayList paths)
- throws HiveException, IOException {
+ throws HiveException, IOException {
JobConf jc;
if (hconf instanceof JobConf) {
@@ -726,9 +746,9 @@
TableDesc tableInfo = conf.getTableInfo();
try {
Serializer serializer = (Serializer) tableInfo.getDeserializerClass().newInstance();
- serializer.initialize(null, tableInfo.getProperties());
- outputClass = serializer.getSerializedClass();
- hiveOutputFormat = conf.getTableInfo().getOutputFileFormatClass().newInstance();
+ serializer.initialize(null, tableInfo.getProperties());
+ outputClass = serializer.getSerializedClass();
+ hiveOutputFormat = conf.getTableInfo().getOutputFileFormatClass().newInstance();
} catch (SerDeException e) {
throw new HiveException(e);
} catch (InstantiationException e) {
@@ -754,6 +774,30 @@
@Override
public void augmentPlan() {
PlanUtils.configureTableJobPropertiesForStorageHandler(
- getConf().getTableInfo());
+ getConf().getTableInfo());
}
+
+ private void publishStats() {
+ // Initializing a stats publisher
+ StatsPublisher statsPublisher;
+ String statsImplementationClass = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVESTATSDBCLASS);
+ if (StatsFactory.setImplementation(statsImplementationClass, jc)) {
+ statsPublisher = StatsFactory.getStatsPublisher();
+ statsPublisher.connect(hconf);
+
+ for (String fspKey : valToPaths.keySet()) {
+ FSPaths fspValue = valToPaths.get(fspKey);
+ String prefix = conf.getStaticSpec();
+ if (fspKey != "") {
+ prefix += File.separator;
+ }
+ /* "prefix + fspKey" forms the key of the temporary storage relation. This key
+ is the table name in case of non-partitioned tables, but it is
+ "TableName + PartitionSpec" in case of partitions */
+ statsPublisher.publishStat(prefix + fspKey, StatsSetupConst.ROW_COUNT, Long.toString(fspValue.stat.getNumRows()));
+ }
+
+ statsPublisher.closeConnection();
+ }
+ }
}
Index: ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java (revision 982011)
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java (working copy)
@@ -31,6 +31,7 @@
import org.apache.hadoop.hive.ql.plan.FunctionWork;
import org.apache.hadoop.hive.ql.plan.MapredWork;
import org.apache.hadoop.hive.ql.plan.MoveWork;
+import org.apache.hadoop.hive.ql.plan.StatsWork;
/**
* TaskFactory implementation.
@@ -67,6 +68,8 @@
ConditionalTask.class));
taskvec.add(new taskTuple(MapredWork.class,
MapRedTask.class));
+ taskvec.add(new taskTuple(StatsWork.class,
+ StatsTask.class));
}
private static ThreadLocal tid = new ThreadLocal() {
@@ -130,7 +133,7 @@
}
makeChild(ret, tasklist);
-
+
return (ret);
}
Index: ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java (revision 982011)
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java (working copy)
@@ -19,10 +19,25 @@
package org.apache.hadoop.hive.ql.exec;
import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.TableScanDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
+import org.apache.hadoop.hive.ql.stats.StatsFactory;
+import org.apache.hadoop.hive.ql.stats.StatsPublisher;
+import org.apache.hadoop.hive.ql.stats.StatsSetupConst;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
+import org.apache.hadoop.hive.serde2.objectinspector.StructField;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.mapred.JobConf;
/**
* Table Scan Operator If the data is coming from the map-reduce framework, just
@@ -30,25 +45,112 @@
* read as part of map-reduce framework
**/
public class TableScanOperator extends Operator implements
- Serializable {
+Serializable {
private static final long serialVersionUID = 1L;
+ private transient List