Index: ql/src/test/results/clientnegative/ddltime.q.out =================================================================== --- ql/src/test/results/clientnegative/ddltime.q.out (revision 0) +++ ql/src/test/results/clientnegative/ddltime.q.out (revision 0) @@ -0,0 +1,10 @@ +PREHOOK: query: drop table T2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table T2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table T2 like srcpart +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table T2 like srcpart +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@T2 +FAILED: Error in semantic analysis: org.apache.hadoop.hive.ql.parse.SemanticException: HOLD_DDLTIME hint cannot be applied to dynamic partitions or non-existent partitions Index: ql/src/test/results/clientpositive/ddltime.q.out =================================================================== --- ql/src/test/results/clientpositive/ddltime.q.out (revision 0) +++ ql/src/test/results/clientpositive/ddltime.q.out (revision 0) @@ -0,0 +1,254 @@ +PREHOOK: query: create table T1 like src +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table T1 like src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@T1 +PREHOOK: query: desc extended T1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended T1 +POSTHOOK: type: DESCTABLE +key string default +value string default + +Detailed Table Information Table(tableName:t1, dbName:default, owner:null, createTime:1277236615, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:file:/data/users/nzhang/work/876/apache-hive/build/ql/test/data/warehouse/t1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE,transient_lastDdlTime=1277236615}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: insert overwrite table T1 select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t1 +POSTHOOK: query: insert overwrite table T1 select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t1 +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc extended T1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended T1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +key string default +value string default + +Detailed Table Information Table(tableName:t1, dbName:default, owner:null, createTime:1277236615, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:file:/data/users/nzhang/work/876/apache-hive/build/ql/test/data/warehouse/t1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE,transient_lastDdlTime=1277236620}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: insert overwrite table T1 select /*+ HOLD_DDLTIME*/ * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t1 +POSTHOOK: query: insert overwrite table T1 select /*+ HOLD_DDLTIME*/ * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t1 +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc extended T1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended T1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +key string default +value string default + +Detailed Table Information Table(tableName:t1, dbName:default, owner:null, createTime:1277236615, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:file:/data/users/nzhang/work/876/apache-hive/build/ql/test/data/warehouse/t1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE,transient_lastDdlTime=1277236620}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: insert overwrite table T1 select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t1 +POSTHOOK: query: insert overwrite table T1 select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t1 +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc extended T1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended T1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +key string default +value string default + +Detailed Table Information Table(tableName:t1, dbName:default, owner:null, createTime:1277236615, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:file:/data/users/nzhang/work/876/apache-hive/build/ql/test/data/warehouse/t1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE,transient_lastDdlTime=1277236629}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: drop table T1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table T1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: default@t1 +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table if not exists T2 like srcpart +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table if not exists T2 like srcpart +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@T2 +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc extended T2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended T2 +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +key string default +value string default +ds string +hr string + +Detailed Table Information Table(tableName:t2, dbName:default, owner:null, createTime:1277236629, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:file:/data/users/nzhang/work/876/apache-hive/build/ql/test/data/warehouse/t2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{EXTERNAL=FALSE,transient_lastDdlTime=1277236629}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +PREHOOK: query: insert overwrite table T2 partition (ds = '2010-06-21', hr = '1') select key, value from src where key > 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t2@ds=2010-06-21/hr=1 +POSTHOOK: query: insert overwrite table T2 partition (ds = '2010-06-21', hr = '1') select key, value from src where key > 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t2@ds=2010-06-21/hr=1 +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc extended T2 partition (ds = '2010-06-21', hr = '1') +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended T2 partition (ds = '2010-06-21', hr = '1') +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +key string default +value string default +ds string +hr string + +Detailed Partition Information Partition(values:[2010-06-21, 1], dbName:default, tableName:t2, createTime:1277236634, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:file:/data/users/nzhang/work/876/apache-hive/build/ql/test/data/warehouse/t2/ds=2010-06-21/hr=1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1277236634}) +PREHOOK: query: insert overwrite table T2 partition (ds = '2010-06-21', hr='1') select /*+ HOLD_DDLTIME */ key, value from src where key > 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t2@ds=2010-06-21/hr=1 +POSTHOOK: query: insert overwrite table T2 partition (ds = '2010-06-21', hr='1') select /*+ HOLD_DDLTIME */ key, value from src where key > 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t2@ds=2010-06-21/hr=1 +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc extended T2 partition (ds = '2010-06-21', hr = '1') +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended T2 partition (ds = '2010-06-21', hr = '1') +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +key string default +value string default +ds string +hr string + +Detailed Partition Information Partition(values:[2010-06-21, 1], dbName:default, tableName:t2, createTime:1277236634, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:file:/data/users/nzhang/work/876/apache-hive/build/ql/test/data/warehouse/t2/ds=2010-06-21/hr=1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1277236634}) +PREHOOK: query: insert overwrite table T2 partition (ds='2010-06-01', hr='1') select key, value from src where key > 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@t2@ds=2010-06-01/hr=1 +POSTHOOK: query: insert overwrite table T2 partition (ds='2010-06-01', hr='1') select key, value from src where key > 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@t2@ds=2010-06-01/hr=1 +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-01,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-01,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc extended T2 partition(ds='2010-06-01', hr='1') +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc extended T2 partition(ds='2010-06-01', hr='1') +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-01,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-01,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +key string default +value string default +ds string +hr string + +Detailed Partition Information Partition(values:[2010-06-01, 1], dbName:default, tableName:t2, createTime:1277236643, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:file:/data/users/nzhang/work/876/apache-hive/build/ql/test/data/warehouse/t2/ds=2010-06-01/hr=1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1277236643}) +PREHOOK: query: drop table T2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table T2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: default@t2 +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-01,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-01,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t2 PARTITION(ds=2010-06-21,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Index: ql/src/test/org/apache/hadoop/hive/ql/history/TestHiveHistory.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/history/TestHiveHistory.java (revision 956741) +++ ql/src/test/org/apache/hadoop/hive/ql/history/TestHiveHistory.java (working copy) @@ -99,7 +99,7 @@ db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class); - db.loadTable(hadoopDataFile[i], src, false, null); + db.loadTable(hadoopDataFile[i], src, false, null, false); i++; } Index: ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java (revision 956741) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java (working copy) @@ -118,7 +118,7 @@ db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class); - db.loadTable(hadoopDataFile[i], src, false, null); + db.loadTable(hadoopDataFile[i], src, false, null, false); i++; } @@ -130,6 +130,7 @@ MapredWork mr; + @Override protected void setUp() { mr = PlanUtils.getMapRedWork(); } Index: ql/src/test/queries/clientnegative/ddltime.q =================================================================== --- ql/src/test/queries/clientnegative/ddltime.q (revision 0) +++ ql/src/test/queries/clientnegative/ddltime.q (revision 0) @@ -0,0 +1,6 @@ +drop table T2; +create table T2 like srcpart; + +insert overwrite table T2 partition (ds = '2010-06-21', hr='1') select /*+ HOLD_DDLTIME */ key, value from src where key > 10; + +drop table T2; Index: ql/src/test/queries/clientpositive/ddltime.q =================================================================== --- ql/src/test/queries/clientpositive/ddltime.q (revision 0) +++ ql/src/test/queries/clientpositive/ddltime.q (revision 0) @@ -0,0 +1,45 @@ +create table T1 like src; + +desc extended T1; + +!sleep 1; +insert overwrite table T1 select * from src; + +desc extended T1; + +!sleep 1; + +insert overwrite table T1 select /*+ HOLD_DDLTIME*/ * from src; + +desc extended T1; + +!sleep 1; + +insert overwrite table T1 select * from src; + +desc extended T1; + +drop table T1; + +create table if not exists T2 like srcpart; +desc extended T2; + +!sleep 1; + +insert overwrite table T2 partition (ds = '2010-06-21', hr = '1') select key, value from src where key > 10; + +desc extended T2 partition (ds = '2010-06-21', hr = '1'); + +!sleep 1; + +insert overwrite table T2 partition (ds = '2010-06-21', hr='1') select /*+ HOLD_DDLTIME */ key, value from src where key > 10; + +desc extended T2 partition (ds = '2010-06-21', hr = '1'); + +!sleep 1; + +insert overwrite table T2 partition (ds='2010-06-01', hr='1') select key, value from src where key > 10; + +desc extended T2 partition(ds='2010-06-01', hr='1'); + +drop table T2; Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (revision 956741) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (working copy) @@ -537,7 +537,8 @@ * The temporary directory. */ public void loadPartition(Path loadPath, String tableName, - Map partSpec, boolean replace, Path tmpDirPath) + Map partSpec, boolean replace, Path tmpDirPath, + boolean holdDDLTime) throws HiveException { Table tbl = getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); try { @@ -575,7 +576,9 @@ } // recreate the partition if it existed before - part = getPartition(tbl, partSpec, true); + if (!holdDDLTime) { + part = getPartition(tbl, partSpec, true); + } } catch (IOException e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -601,7 +604,7 @@ */ public ArrayList> loadDynamicPartitions(Path loadPath, String tableName, Map partSpec, boolean replace, - Path tmpDirPath, int numDP) + Path tmpDirPath, int numDP, boolean holdDDLTime) throws HiveException { try { @@ -633,7 +636,7 @@ fullPartSpecs.add(fullPartSpec); // finally load the partition -- move the file to the final table address - loadPartition(partPath, tableName, fullPartSpec, replace, tmpDirPath); + loadPartition(partPath, tableName, fullPartSpec, replace, tmpDirPath, holdDDLTime); LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec); } return fullPartSpecs; @@ -658,7 +661,7 @@ * The temporary directory. */ public void loadTable(Path loadPath, String tableName, boolean replace, - Path tmpDirPath) throws HiveException { + Path tmpDirPath, boolean holdDDLTime) throws HiveException { Table tbl = getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); if (replace) { @@ -666,9 +669,17 @@ } else { tbl.copyFiles(loadPath); } + + if (!holdDDLTime) { + try { + alterTable(tableName, tbl); + } catch (InvalidOperationException e) { + throw new HiveException(e); + } + } } - /** + /** * Creates a partition. * * @param tbl Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (revision 956741) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (working copy) @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; @@ -45,7 +46,6 @@ import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.util.StringUtils; /** @@ -171,7 +171,8 @@ if (tbd.getPartitionSpec().size() == 0) { dc = new DataContainer(table.getTTable()); db.loadTable(new Path(tbd.getSourceDir()), tbd.getTable() - .getTableName(), tbd.getReplace(), new Path(tbd.getTmpDir())); + .getTableName(), tbd.getReplace(), new Path(tbd.getTmpDir()), + tbd.getHoldDDLTime()); if (work.getOutputs() != null) { work.getOutputs().add(new WriteEntity(table)); } @@ -188,7 +189,8 @@ tbd.getPartitionSpec(), tbd.getReplace(), new Path(tbd.getTmpDir()), - dpCtx.getNumDPCols()); + dpCtx.getNumDPCols(), + tbd.getHoldDDLTime()); // for each partition spec, get the partition // and put it to WriteEntity for post-exec hook for (LinkedHashMap partSpec: dp) { @@ -221,7 +223,8 @@ dc = null; // reset data container to prevent it being added again. } else { // static partitions db.loadPartition(new Path(tbd.getSourceDir()), tbd.getTable().getTableName(), - tbd.getPartitionSpec(), tbd.getReplace(), new Path(tbd.getTmpDir())); + tbd.getPartitionSpec(), tbd.getReplace(), new Path(tbd.getTmpDir()), + tbd.getHoldDDLTime()); Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false); dc = new DataContainer(table.getTTable(), partn.getTPartition()); // add this partition to post-execution hook Index: ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java (revision 956741) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java (working copy) @@ -32,12 +32,14 @@ private boolean replace; private String tmpDir; private DynamicPartitionCtx dpCtx; + private boolean holdDDLTime; // TODO: the below seems like they should just be combined into partitionDesc private org.apache.hadoop.hive.ql.plan.TableDesc table; private Map partitionSpec; // NOTE: this partitionSpec has to be ordered map public LoadTableDesc() { + this.holdDDLTime = false; } public LoadTableDesc(final String sourceDir, final String tmpDir, @@ -72,8 +74,17 @@ this.table = table; this.partitionSpec = partitionSpec; this.replace = replace; + this.holdDDLTime = false; } + public void setHoldDDLTime(boolean ddlTime) { + holdDDLTime = ddlTime; + } + + public boolean getHoldDDLTime() { + return holdDDLTime; + } + @Explain(displayName = "tmp directory", normalExplain = false) public String getTmpDir() { return tmpDir; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/ErrorMsg.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/ErrorMsg.java (revision 956741) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ErrorMsg.java (working copy) @@ -158,6 +158,8 @@ UNARCHIVE_ON_MULI_PARTS("ARCHIVE can only be run on a single partition"), ARCHIVE_ON_TABLE("ARCHIVE can only be run on partitions"), RESERVED_PART_VAL("Partition value contains a reserved substring"), + HOLD_DDLTIME_ON_NONEXIST_PARTITIONS("HOLD_DDLTIME hint cannot be applied to dynamic " + + "partitions or non-existent partitions"), ; private String mesg; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (revision 956741) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (working copy) @@ -149,6 +149,7 @@ TOK_HINT; TOK_MAPJOIN; TOK_STREAMTABLE; +TOK_HOLD_DDLTIME; TOK_HINTARGLIST; TOK_USERSCRIPTCOLNAMES; TOK_USERSCRIPTCOLSCHEMA; @@ -923,7 +924,7 @@ @init { msgs.push("hint item"); } @after { msgs.pop(); } : - hintName (LPAREN hintArgs RPAREN)? -> ^(TOK_HINT hintName hintArgs) + hintName (LPAREN hintArgs RPAREN)? -> ^(TOK_HINT hintName hintArgs?) ; hintName @@ -932,6 +933,7 @@ : KW_MAPJOIN -> TOK_MAPJOIN | KW_STREAMTABLE -> TOK_STREAMTABLE + | KW_HOLD_DDLTIME -> TOK_HOLD_DDLTIME ; hintArgs @@ -1601,6 +1603,7 @@ KW_END: 'END'; KW_MAPJOIN: 'MAPJOIN'; KW_STREAMTABLE: 'STREAMTABLE'; +KW_HOLD_DDLTIME: 'HOLD_DDLTIME'; KW_CLUSTERSTATUS: 'CLUSTERSTATUS'; KW_UTC: 'UTC'; KW_UTCTIMESTAMP: 'UTC_TMESTAMP'; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 956741) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -31,9 +31,9 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; import java.util.TreeSet; -import java.util.Map.Entry; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; @@ -88,6 +88,7 @@ import org.apache.hadoop.hive.ql.optimizer.GenMRFileSink1; import org.apache.hadoop.hive.ql.optimizer.GenMROperator; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext; +import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink1; import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink2; import org.apache.hadoop.hive.ql.optimizer.GenMRRedSink3; @@ -97,7 +98,6 @@ import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; import org.apache.hadoop.hive.ql.optimizer.MapJoinFactory; import org.apache.hadoop.hive.ql.optimizer.Optimizer; -import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext; import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalOptimizer; import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; @@ -117,6 +117,7 @@ import org.apache.hadoop.hive.ql.plan.FetchWork; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; import org.apache.hadoop.hive.ql.plan.ForwardDesc; import org.apache.hadoop.hive.ql.plan.GroupByDesc; import org.apache.hadoop.hive.ql.plan.JoinCondDesc; @@ -137,12 +138,11 @@ import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.plan.UDTFDesc; import org.apache.hadoop.hive.ql.plan.UnionDesc; -import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFHash; import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; @@ -150,9 +150,9 @@ import org.apache.hadoop.hive.serde2.SerDeUtils; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; @@ -3146,6 +3146,25 @@ return input; } + /** + * Check for HOLD_DDLTIME hint. + * @param qb + * @return true if HOLD_DDLTIME is set, false otherwise. + */ + private boolean checkHoldDDLTime(QB qb) { + ASTNode hints = qb.getParseInfo().getHints(); + if (hints == null) { + return false; + } + for (int pos = 0; pos < hints.getChildCount(); pos++) { + ASTNode hint = (ASTNode) hints.getChild(pos); + if (((ASTNode) hint.getChild(0)).getToken().getType() == HiveParser.TOK_HOLD_DDLTIME) { + return true; + } + } + return false; + } + @SuppressWarnings("nls") private Operator genFileSinkPlan(String dest, QB qb, Operator input) throws SemanticException { @@ -3163,6 +3182,7 @@ SortBucketRSCtx rsCtx = new SortBucketRSCtx(); DynamicPartitionCtx dpCtx = null; LoadTableDesc ltd = null; + boolean holdDDLTime = checkHoldDDLTime(qb); switch (dest_type.intValue()) { case QBMetaData.DEST_TABLE: { @@ -3177,6 +3197,11 @@ if (partSpec== null || partSpec.size() == 0) { // user did NOT specify partition throw new SemanticException(ErrorMsg.NEED_PARTITION_ERROR.getMsg()); } + // the HOLD_DDLTIIME hint should not be used with dynamic partition since the + // newly generated partitions should always update their DDLTIME + if (holdDDLTime) { + throw new SemanticException(ErrorMsg.HOLD_DDLTIME_ON_NONEXIST_PARTITIONS.getMsg()); + } dpCtx = qbm.getDPCtx(dest); if (dpCtx == null) { validatePartSpec(dest_tab, partSpec); @@ -3234,6 +3259,10 @@ if (!isNonNativeTable) { ltd = new LoadTableDesc(queryTmpdir, ctx.getExternalTmpFileURI(dest_path.toUri()), table_desc, dpCtx); + if (holdDDLTime) { + LOG.info("this query will not update transient_lastDdlTime!"); + ltd.setHoldDDLTime(true); + } loadTableWork.add(ltd); } @@ -3267,9 +3296,20 @@ currentTableId = destTableId; destTableId++; - ltd = new LoadTableDesc(queryTmpdir, ctx - .getExternalTmpFileURI(dest_path.toUri()), table_desc, dest_part - .getSpec()); + ltd = new LoadTableDesc(queryTmpdir, ctx.getExternalTmpFileURI(dest_path.toUri()), + table_desc, dest_part.getSpec()); + if (holdDDLTime) { + try { + Partition part = db.getPartition(dest_tab, dest_part.getSpec(), false); + if (part == null) { + throw new SemanticException(ErrorMsg.HOLD_DDLTIME_ON_NONEXIST_PARTITIONS.getMsg()); + } + } catch (HiveException e) { + throw new SemanticException(e); + } + LOG.info("this query will not update transient_lastDdlTime!"); + ltd.setHoldDDLTime(true); + } loadTableWork.add(ltd); if (!outputs.add(new WriteEntity(dest_part))) { throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES