Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1068586) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -1181,27 +1181,36 @@ "Unable to add partition because table or database do not exist"); } - String partLocationStr = part.getSd().getLocation(); + String partLocationStr = null; + if (part.getSd() != null) { + partLocationStr = part.getSd().getLocation(); + } if (partLocationStr == null || partLocationStr.isEmpty()) { - // set default location if not specified - partLocation = new Path(tbl.getSd().getLocation(), Warehouse + // set default location if not specified and this is + // a physical table partition (not a view) + if (tbl.getSd().getLocation() != null) { + partLocation = new Path(tbl.getSd().getLocation(), Warehouse .makePartName(tbl.getPartitionKeys(), part.getValues())); + } } else { partLocation = wh.getDnsPath(new Path(partLocationStr)); } - part.getSd().setLocation(partLocation.toString()); + if (partLocation != null) { + part.getSd().setLocation(partLocation.toString()); - // Check to see if the directory already exists before calling mkdirs() - // because if the file system is read-only, mkdirs will throw an - // exception even if the directory already exists. - if (!wh.isDir(partLocation)) { - if (!wh.mkdirs(partLocation)) { - throw new MetaException(partLocation + + // Check to see if the directory already exists before calling + // mkdirs() because if the file system is read-only, mkdirs will + // throw an exception even if the directory already exists. + if (!wh.isDir(partLocation)) { + if (!wh.mkdirs(partLocation)) { + throw new MetaException(partLocation + " is not a directory or unable to create one"); + } + madeDir = true; } - madeDir = true; } // set create time @@ -1275,14 +1284,13 @@ if (isArchived) { archiveParentDir = MetaStoreUtils.getOriginalLocation(part); } - if (part.getSd() == null || part.getSd().getLocation() == null) { - throw new MetaException("Partition metadata is corrupted"); - } if (!ms.dropPartition(db_name, tbl_name, part_vals)) { throw new MetaException("Unable to drop partition"); } success = ms.commitTransaction(); - partPath = new Path(part.getSd().getLocation()); + if ((part.getSd() != null) && (part.getSd().getLocation() != null)) { + partPath = new Path(part.getSd().getLocation()); + } tbl = get_table(db_name, tbl_name); } finally { if (!success) { Index: ql/src/test/results/clientnegative/alter_view_failure2.q.out =================================================================== --- ql/src/test/results/clientnegative/alter_view_failure2.q.out (revision 0) +++ ql/src/test/results/clientnegative/alter_view_failure2.q.out (revision 0) @@ -0,0 +1,23 @@ +PREHOOK: query: DROP VIEW xxx4 +PREHOOK: type: DROPVIEW +POSTHOOK: query: DROP VIEW xxx4 +POSTHOOK: type: DROPVIEW +PREHOOK: query: CREATE VIEW xxx4 +PARTITIONED ON (value) +AS +SELECT * FROM src +PREHOOK: type: CREATEVIEW +PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-01-27_13-24-33_030_8341647874668775602/-mr-10000 +POSTHOOK: query: CREATE VIEW xxx4 +PARTITIONED ON (value) +AS +SELECT * FROM src +POSTHOOK: type: CREATEVIEW +POSTHOOK: Output: default@xxx4 +POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-01-27_13-24-33_030_8341647874668775602/-mr-10000 +PREHOOK: query: -- should fail: need to use ALTER VIEW, not ALTER TABLE +ALTER TABLE xxx4 ADD PARTITION (value='val_86') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@xxx4 +FAILED: Error in metadata: Cannot alter a view with ALTER TABLE +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask Index: ql/src/test/results/clientnegative/create_view_failure7.q.out =================================================================== --- ql/src/test/results/clientnegative/create_view_failure7.q.out (revision 0) +++ ql/src/test/results/clientnegative/create_view_failure7.q.out (revision 0) @@ -0,0 +1,5 @@ +PREHOOK: query: DROP VIEW xxx16 +PREHOOK: type: DROPVIEW +POSTHOOK: query: DROP VIEW xxx16 +POSTHOOK: type: DROPVIEW +FAILED: Error in semantic analysis: At least one non-partitioning column must be present in view Index: ql/src/test/results/clientnegative/alter_view_failure6.q.out =================================================================== --- ql/src/test/results/clientnegative/alter_view_failure6.q.out (revision 0) +++ ql/src/test/results/clientnegative/alter_view_failure6.q.out (revision 0) @@ -0,0 +1,19 @@ +PREHOOK: query: DROP VIEW xxx7 +PREHOOK: type: DROPVIEW +POSTHOOK: query: DROP VIEW xxx7 +POSTHOOK: type: DROPVIEW +PREHOOK: query: CREATE VIEW xxx7 +PARTITIONED ON (key) +AS +SELECT hr,key FROM srcpart +PREHOOK: type: CREATEVIEW +PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-08_18-38-11_186_2914437704768475613/-mr-10000 +POSTHOOK: query: CREATE VIEW xxx7 +PARTITIONED ON (key) +AS +SELECT hr,key FROM srcpart +POSTHOOK: type: CREATEVIEW +POSTHOOK: Output: default@xxx7 +POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-08_18-38-11_186_2914437704768475613/-mr-10000 +FAILED: Error in semantic analysis: No Partition Predicate Found for Alias "xxx7:srcpart" Table "srcpart" +FAILED: Error in semantic analysis: The query does not reference any valid partition. To run this query, set hive.mapred.mode=nonstrict Index: ql/src/test/results/clientnegative/alter_view_failure.q.out =================================================================== --- ql/src/test/results/clientnegative/alter_view_failure.q.out (revision 1068586) +++ ql/src/test/results/clientnegative/alter_view_failure.q.out (working copy) @@ -4,14 +4,14 @@ POSTHOOK: type: DROPVIEW PREHOOK: query: CREATE VIEW xxx3 AS SELECT * FROM src PREHOOK: type: CREATEVIEW -PREHOOK: Output: file:/tmp/njain/hive_2010-08-16_23-08-10_293_51772665808484032/-mr-10000 +PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-03_16-14-26_185_7721791068067207339/-mr-10000 POSTHOOK: query: CREATE VIEW xxx3 AS SELECT * FROM src POSTHOOK: type: CREATEVIEW -POSTHOOK: Output: file:/tmp/njain/hive_2010-08-16_23-08-10_293_51772665808484032/-mr-10000 POSTHOOK: Output: default@xxx3 +POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-03_16-14-26_185_7721791068067207339/-mr-10000 PREHOOK: query: ALTER TABLE xxx3 REPLACE COLUMNS (xyz int) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@xxx3 PREHOOK: Output: default@xxx3 -FAILED: Error in metadata: Cannot use this form of ALTER TABLE on a view +FAILED: Error in metadata: Cannot alter a view with ALTER TABLE FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask Index: ql/src/test/results/clientnegative/create_view_failure6.q.out =================================================================== --- ql/src/test/results/clientnegative/create_view_failure6.q.out (revision 0) +++ ql/src/test/results/clientnegative/create_view_failure6.q.out (revision 0) @@ -0,0 +1,5 @@ +PREHOOK: query: DROP VIEW xxx15 +PREHOOK: type: DROPVIEW +POSTHOOK: query: DROP VIEW xxx15 +POSTHOOK: type: DROPVIEW +FAILED: Error in semantic analysis: Rightmost columns in view output do not match PARTITIONED ON clause Index: ql/src/test/results/clientnegative/alter_view_failure5.q.out =================================================================== --- ql/src/test/results/clientnegative/alter_view_failure5.q.out (revision 0) +++ ql/src/test/results/clientnegative/alter_view_failure5.q.out (revision 0) @@ -0,0 +1,23 @@ +PREHOOK: query: DROP VIEW xxx6 +PREHOOK: type: DROPVIEW +POSTHOOK: query: DROP VIEW xxx6 +POSTHOOK: type: DROPVIEW +PREHOOK: query: CREATE VIEW xxx6 +PARTITIONED ON (value) +AS +SELECT * FROM src +PREHOOK: type: CREATEVIEW +PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-01_16-25-12_734_1444856635905240227/-mr-10000 +POSTHOOK: query: CREATE VIEW xxx6 +PARTITIONED ON (value) +AS +SELECT * FROM src +POSTHOOK: type: CREATEVIEW +POSTHOOK: Output: default@xxx6 +POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-01_16-25-12_734_1444856635905240227/-mr-10000 +PREHOOK: query: -- should fail: partition column name does not match +ALTER VIEW xxx6 ADD PARTITION (v='val_86') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@xxx6 +FAILED: Error in metadata: value not found in table's partition spec: {v=val_86} +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask Index: ql/src/test/results/clientnegative/alter_view_failure4.q.out =================================================================== --- ql/src/test/results/clientnegative/alter_view_failure4.q.out (revision 0) +++ ql/src/test/results/clientnegative/alter_view_failure4.q.out (revision 0) @@ -0,0 +1,23 @@ +PREHOOK: query: DROP VIEW xxx5 +PREHOOK: type: DROPVIEW +POSTHOOK: query: DROP VIEW xxx5 +POSTHOOK: type: DROPVIEW +PREHOOK: query: CREATE VIEW xxx5 +PARTITIONED ON (value) +AS +SELECT * FROM src +PREHOOK: type: CREATEVIEW +PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-01_13-10-39_711_3030625582468884410/-mr-10000 +POSTHOOK: query: CREATE VIEW xxx5 +PARTITIONED ON (value) +AS +SELECT * FROM src +POSTHOOK: type: CREATEVIEW +POSTHOOK: Output: default@xxx5 +POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-01_13-10-39_711_3030625582468884410/-mr-10000 +PREHOOK: query: -- should fail: LOCATION clause is illegal +ALTER VIEW xxx5 ADD PARTITION (value='val_86') LOCATION '/foo/bar/baz' +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@xxx5 +FAILED: Error in metadata: LOCATION clause illegal for view partition +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask Index: ql/src/test/results/clientnegative/create_view_failure9.q.out =================================================================== --- ql/src/test/results/clientnegative/create_view_failure9.q.out (revision 0) +++ ql/src/test/results/clientnegative/create_view_failure9.q.out (revision 0) @@ -0,0 +1,5 @@ +PREHOOK: query: DROP VIEW xxx18 +PREHOOK: type: DROPVIEW +POSTHOOK: query: DROP VIEW xxx18 +POSTHOOK: type: DROPVIEW +FAILED: Error in semantic analysis: Rightmost columns in view output do not match PARTITIONED ON clause Index: ql/src/test/results/clientnegative/analyze_view.q.out =================================================================== --- ql/src/test/results/clientnegative/analyze_view.q.out (revision 0) +++ ql/src/test/results/clientnegative/analyze_view.q.out (revision 0) @@ -0,0 +1,12 @@ +PREHOOK: query: DROP VIEW av +PREHOOK: type: DROPVIEW +POSTHOOK: query: DROP VIEW av +POSTHOOK: type: DROPVIEW +PREHOOK: query: CREATE VIEW av AS SELECT * FROM src +PREHOOK: type: CREATEVIEW +PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-01_17-50-22_779_44083551773069928/-mr-10000 +POSTHOOK: query: CREATE VIEW av AS SELECT * FROM src +POSTHOOK: type: CREATEVIEW +POSTHOOK: Output: default@av +POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-01_17-50-22_779_44083551773069928/-mr-10000 +FAILED: Error in semantic analysis: ANALYZE is not supported for views Index: ql/src/test/results/clientnegative/alter_view_failure3.q.out =================================================================== --- ql/src/test/results/clientnegative/alter_view_failure3.q.out (revision 0) +++ ql/src/test/results/clientnegative/alter_view_failure3.q.out (revision 0) @@ -0,0 +1,6 @@ +PREHOOK: query: -- should fail: can't use ALTER VIEW on a table +ALTER VIEW srcpart ADD PARTITION (ds='2012-12-31', hr='23') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@srcpart +FAILED: Error in metadata: Cannot alter a base table with ALTER VIEW +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask Index: ql/src/test/results/clientnegative/create_view_failure8.q.out =================================================================== --- ql/src/test/results/clientnegative/create_view_failure8.q.out (revision 0) +++ ql/src/test/results/clientnegative/create_view_failure8.q.out (revision 0) @@ -0,0 +1,5 @@ +PREHOOK: query: DROP VIEW xxx17 +PREHOOK: type: DROPVIEW +POSTHOOK: query: DROP VIEW xxx17 +POSTHOOK: type: DROPVIEW +FAILED: Error in semantic analysis: Rightmost columns in view output do not match PARTITIONED ON clause Index: ql/src/test/results/clientpositive/create_view_partitioned.q.out =================================================================== --- ql/src/test/results/clientpositive/create_view_partitioned.q.out (revision 0) +++ ql/src/test/results/clientpositive/create_view_partitioned.q.out (revision 0) @@ -0,0 +1,412 @@ +PREHOOK: query: DROP VIEW vp1 +PREHOOK: type: DROPVIEW +POSTHOOK: query: DROP VIEW vp1 +POSTHOOK: type: DROPVIEW +PREHOOK: query: DROP VIEW vp2 +PREHOOK: type: DROPVIEW +POSTHOOK: query: DROP VIEW vp2 +POSTHOOK: type: DROPVIEW +PREHOOK: query: DROP VIEW vp3 +PREHOOK: type: DROPVIEW +POSTHOOK: query: DROP VIEW vp3 +POSTHOOK: type: DROPVIEW +PREHOOK: query: -- test partitioned view definition +-- (underlying table is not actually partitioned) +CREATE VIEW vp1 +PARTITIONED ON (value) +AS +SELECT key, value +FROM src +WHERE key=86 +PREHOOK: type: CREATEVIEW +PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-20_202_459725770522554343/-mr-10000 +POSTHOOK: query: -- test partitioned view definition +-- (underlying table is not actually partitioned) +CREATE VIEW vp1 +PARTITIONED ON (value) +AS +SELECT key, value +FROM src +WHERE key=86 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Output: default@vp1 +POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-20_202_459725770522554343/-mr-10000 +PREHOOK: query: DESCRIBE EXTENDED vp1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE EXTENDED vp1 +POSTHOOK: type: DESCTABLE +key string +value string + +Detailed Table Information Table(tableName:vp1, dbName:default, owner:jsichi, createTime:1297283960, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:value, type:string, comment:null)], parameters:{transient_lastDdlTime=1297283960}, viewOriginalText:SELECT key, value +FROM src +WHERE key=86, viewExpandedText:SELECT `src`.`key`, `src`.`value` +FROM `src` +WHERE `src`.`key`=86, tableType:VIRTUAL_VIEW) +PREHOOK: query: DESCRIBE FORMATTED vp1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE FORMATTED vp1 +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +key string None + +# Partition Information +# col_name data_type comment + +value string None + +# Detailed Table Information +Database: default +Owner: jsichi +CreateTime: Wed Feb 09 12:39:20 PST 2011 +LastAccessTime: UNKNOWN +Protect Mode: None +Retention: 0 +Table Type: VIRTUAL_VIEW +Table Parameters: + transient_lastDdlTime 1297283960 + +# Storage Information +SerDe Library: null +InputFormat: org.apache.hadoop.mapred.SequenceFileInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] + +# View Information +View Original Text: SELECT key, value +FROM src +WHERE key=86 +View Expanded Text: SELECT `src`.`key`, `src`.`value` +FROM `src` +WHERE `src`.`key`=86 +PREHOOK: query: SELECT * FROM vp1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-20_956_4791033359314261882/-mr-10000 +POSTHOOK: query: SELECT * FROM vp1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-20_956_4791033359314261882/-mr-10000 +86 val_86 +PREHOOK: query: SELECT key FROM vp1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-28_000_2791859544795907188/-mr-10000 +POSTHOOK: query: SELECT key FROM vp1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-28_000_2791859544795907188/-mr-10000 +86 +PREHOOK: query: SELECT value FROM vp1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-34_032_4697733201868315411/-mr-10000 +POSTHOOK: query: SELECT value FROM vp1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-34_032_4697733201868315411/-mr-10000 +val_86 +PREHOOK: query: ALTER VIEW vp1 +ADD PARTITION (value='val_86') PARTITION (value='val_xyz') +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@vp1 +POSTHOOK: query: ALTER VIEW vp1 +ADD PARTITION (value='val_86') PARTITION (value='val_xyz') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@vp1 +POSTHOOK: Output: default@vp1@value=val_86 +POSTHOOK: Output: default@vp1@value=val_xyz +PREHOOK: query: -- should work since we use IF NOT EXISTS +ALTER VIEW vp1 +ADD IF NOT EXISTS PARTITION (value='val_xyz') +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@vp1 +PREHOOK: Output: default@vp1@value=val_xyz +POSTHOOK: query: -- should work since we use IF NOT EXISTS +ALTER VIEW vp1 +ADD IF NOT EXISTS PARTITION (value='val_xyz') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@vp1 +POSTHOOK: Output: default@vp1@value=val_xyz +PREHOOK: query: SHOW PARTITIONS vp1 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS vp1 +POSTHOOK: type: SHOWPARTITIONS +value=val_86 +value=val_xyz +PREHOOK: query: SHOW PARTITIONS vp1 PARTITION(value='val_86') +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS vp1 PARTITION(value='val_86') +POSTHOOK: type: SHOWPARTITIONS +value=val_86 +PREHOOK: query: SHOW TABLE EXTENDED LIKE vp1 +PREHOOK: type: SHOW_TABLESTATUS +POSTHOOK: query: SHOW TABLE EXTENDED LIKE vp1 +POSTHOOK: type: SHOW_TABLESTATUS +tableName:vp1 +owner:jsichi +location:null +inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat +outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat +columns:struct columns { string key} +partitioned:true +partitionColumns:struct partition_columns { string value} + +PREHOOK: query: SHOW TABLE EXTENDED LIKE vp1 PARTITION(value='val_86') +PREHOOK: type: SHOW_TABLESTATUS +POSTHOOK: query: SHOW TABLE EXTENDED LIKE vp1 PARTITION(value='val_86') +POSTHOOK: type: SHOW_TABLESTATUS +tableName:vp1 +owner:jsichi +location:null +inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat +outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat +columns:struct columns { string key} +partitioned:true +partitionColumns:struct partition_columns { string value} + +PREHOOK: query: ALTER VIEW vp1 +DROP PARTITION (value='val_xyz') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@vp1 +PREHOOK: Output: default@vp1@value=val_xyz +POSTHOOK: query: ALTER VIEW vp1 +DROP PARTITION (value='val_xyz') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@vp1 +POSTHOOK: Output: default@vp1@value=val_xyz +PREHOOK: query: -- should work since we use IF EXISTS +ALTER VIEW vp1 +DROP IF EXISTS PARTITION (value='val_xyz') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@vp1 +POSTHOOK: query: -- should work since we use IF EXISTS +ALTER VIEW vp1 +DROP IF EXISTS PARTITION (value='val_xyz') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@vp1 +PREHOOK: query: SHOW PARTITIONS vp1 +PREHOOK: type: SHOWPARTITIONS +POSTHOOK: query: SHOW PARTITIONS vp1 +POSTHOOK: type: SHOWPARTITIONS +value=val_86 +PREHOOK: query: -- Even though no partition predicate is specified in the next query, +-- the WHERE clause inside of the view should satisfy strict mode. +-- In other words, strict only applies to underlying tables +-- (regardless of whether or not the view is partitioned). +SELECT * FROM vp1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-41_066_3753395059950041338/-mr-10000 +POSTHOOK: query: -- Even though no partition predicate is specified in the next query, +-- the WHERE clause inside of the view should satisfy strict mode. +-- In other words, strict only applies to underlying tables +-- (regardless of whether or not the view is partitioned). +SELECT * FROM vp1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-41_066_3753395059950041338/-mr-10000 +86 val_86 +PREHOOK: query: -- test a partitioned view on top of an underlying partitioned table, +-- but with only a suffix of the partitioning columns +CREATE VIEW vp2 +PARTITIONED ON (hr) +AS SELECT * FROM srcpart WHERE key < 10 +PREHOOK: type: CREATEVIEW +PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-46_912_6988747758977470257/-mr-10000 +POSTHOOK: query: -- test a partitioned view on top of an underlying partitioned table, +-- but with only a suffix of the partitioning columns +CREATE VIEW vp2 +PARTITIONED ON (hr) +AS SELECT * FROM srcpart WHERE key < 10 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Output: default@vp2 +POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-46_912_6988747758977470257/-mr-10000 +PREHOOK: query: DESCRIBE FORMATTED vp2 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE FORMATTED vp2 +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +key string None +value string None +ds string None + +# Partition Information +# col_name data_type comment + +hr string None + +# Detailed Table Information +Database: default +Owner: jsichi +CreateTime: Wed Feb 09 12:39:46 PST 2011 +LastAccessTime: UNKNOWN +Protect Mode: None +Retention: 0 +Table Type: VIRTUAL_VIEW +Table Parameters: + transient_lastDdlTime 1297283986 + +# Storage Information +SerDe Library: null +InputFormat: org.apache.hadoop.mapred.SequenceFileInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] + +# View Information +View Original Text: SELECT * FROM srcpart WHERE key < 10 +View Expanded Text: SELECT `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` FROM `srcpart` WHERE `srcpart`.`key` < 10 +PREHOOK: query: ALTER VIEW vp2 ADD PARTITION (hr='11') PARTITION (hr='12') +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@vp2 +POSTHOOK: query: ALTER VIEW vp2 ADD PARTITION (hr='11') PARTITION (hr='12') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@vp2 +POSTHOOK: Output: default@vp2@hr=11 +POSTHOOK: Output: default@vp2@hr=12 +PREHOOK: query: SELECT key FROM vp2 WHERE hr='12' ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-47_562_328778258675461955/-mr-10000 +POSTHOOK: query: SELECT key FROM vp2 WHERE hr='12' ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-47_562_328778258675461955/-mr-10000 +0 +0 +0 +0 +0 +0 +2 +2 +4 +4 +5 +5 +5 +5 +5 +5 +8 +8 +9 +9 +PREHOOK: query: -- test a partitioned view where the PARTITIONED ON clause references +-- an imposed column name +CREATE VIEW vp3(k,v) +PARTITIONED ON (v) +AS +SELECT key, value +FROM src +WHERE key=86 +PREHOOK: type: CREATEVIEW +PREHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-55_502_5144513026720493384/-mr-10000 +POSTHOOK: query: -- test a partitioned view where the PARTITIONED ON clause references +-- an imposed column name +CREATE VIEW vp3(k,v) +PARTITIONED ON (v) +AS +SELECT key, value +FROM src +WHERE key=86 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Output: default@vp3 +POSTHOOK: Output: file:/var/folders/7P/7PeC14kXFIWq0PIYyexGbmKuXUk/-Tmp-/jsichi/hive_2011-02-09_12-39-55_502_5144513026720493384/-mr-10000 +PREHOOK: query: DESCRIBE FORMATTED vp3 +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE FORMATTED vp3 +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +k string None + +# Partition Information +# col_name data_type comment + +v string None + +# Detailed Table Information +Database: default +Owner: jsichi +CreateTime: Wed Feb 09 12:39:55 PST 2011 +LastAccessTime: UNKNOWN +Protect Mode: None +Retention: 0 +Table Type: VIRTUAL_VIEW +Table Parameters: + transient_lastDdlTime 1297283995 + +# Storage Information +SerDe Library: null +InputFormat: org.apache.hadoop.mapred.SequenceFileInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] + +# View Information +View Original Text: SELECT key, value +FROM src +WHERE key=86 +View Expanded Text: SELECT `key` AS `k`, `value` AS `v` FROM (SELECT `src`.`key`, `src`.`value` +FROM `src` +WHERE `src`.`key`=86) `vp3` +PREHOOK: query: ALTER VIEW vp3 +ADD PARTITION (v='val_86') +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@vp3 +POSTHOOK: query: ALTER VIEW vp3 +ADD PARTITION (v='val_86') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@vp3 +POSTHOOK: Output: default@vp3@v=val_86 +PREHOOK: query: DROP VIEW vp1 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@vp1 +PREHOOK: Output: default@vp1 +POSTHOOK: query: DROP VIEW vp1 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@vp1 +POSTHOOK: Output: default@vp1 +PREHOOK: query: DROP VIEW vp2 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@vp2 +PREHOOK: Output: default@vp2 +POSTHOOK: query: DROP VIEW vp2 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@vp2 +POSTHOOK: Output: default@vp2 +PREHOOK: query: DROP VIEW vp3 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@vp3 +PREHOOK: Output: default@vp3 +POSTHOOK: query: DROP VIEW vp3 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@vp3 +POSTHOOK: Output: default@vp3 Index: ql/src/test/queries/clientnegative/analyze_view.q =================================================================== --- ql/src/test/queries/clientnegative/analyze_view.q (revision 0) +++ ql/src/test/queries/clientnegative/analyze_view.q (revision 0) @@ -0,0 +1,6 @@ +DROP VIEW av; + +CREATE VIEW av AS SELECT * FROM src; + +-- should fail: can't analyze a view...yet +ANALYZE TABLE av COMPUTE STATISTICS; Index: ql/src/test/queries/clientnegative/create_view_failure6.q =================================================================== --- ql/src/test/queries/clientnegative/create_view_failure6.q (revision 0) +++ ql/src/test/queries/clientnegative/create_view_failure6.q (revision 0) @@ -0,0 +1,6 @@ +DROP VIEW xxx15; + +-- should fail: baz is not a column +CREATE VIEW xxx15 +PARTITIONED ON (baz) +AS SELECT key FROM src; Index: ql/src/test/queries/clientnegative/alter_view_failure3.q =================================================================== --- ql/src/test/queries/clientnegative/alter_view_failure3.q (revision 0) +++ ql/src/test/queries/clientnegative/alter_view_failure3.q (revision 0) @@ -0,0 +1,2 @@ +-- should fail: can't use ALTER VIEW on a table +ALTER VIEW srcpart ADD PARTITION (ds='2012-12-31', hr='23'); Index: ql/src/test/queries/clientnegative/create_view_failure7.q =================================================================== --- ql/src/test/queries/clientnegative/create_view_failure7.q (revision 0) +++ ql/src/test/queries/clientnegative/create_view_failure7.q (revision 0) @@ -0,0 +1,6 @@ +DROP VIEW xxx16; + +-- should fail: must have at least one non-partitioning column +CREATE VIEW xxx16 +PARTITIONED ON (key) +AS SELECT key FROM src; Index: ql/src/test/queries/clientnegative/alter_view_failure4.q =================================================================== --- ql/src/test/queries/clientnegative/alter_view_failure4.q (revision 0) +++ ql/src/test/queries/clientnegative/alter_view_failure4.q (revision 0) @@ -0,0 +1,8 @@ +DROP VIEW xxx5; +CREATE VIEW xxx5 +PARTITIONED ON (value) +AS +SELECT * FROM src; + +-- should fail: LOCATION clause is illegal +ALTER VIEW xxx5 ADD PARTITION (value='val_86') LOCATION '/foo/bar/baz'; Index: ql/src/test/queries/clientnegative/create_view_failure8.q =================================================================== --- ql/src/test/queries/clientnegative/create_view_failure8.q (revision 0) +++ ql/src/test/queries/clientnegative/create_view_failure8.q (revision 0) @@ -0,0 +1,6 @@ +DROP VIEW xxx17; + +-- should fail: partitioning key must be at end +CREATE VIEW xxx17 +PARTITIONED ON (key) +AS SELECT key,value FROM src; Index: ql/src/test/queries/clientnegative/alter_view_failure5.q =================================================================== --- ql/src/test/queries/clientnegative/alter_view_failure5.q (revision 0) +++ ql/src/test/queries/clientnegative/alter_view_failure5.q (revision 0) @@ -0,0 +1,8 @@ +DROP VIEW xxx6; +CREATE VIEW xxx6 +PARTITIONED ON (value) +AS +SELECT * FROM src; + +-- should fail: partition column name does not match +ALTER VIEW xxx6 ADD PARTITION (v='val_86'); Index: ql/src/test/queries/clientnegative/alter_view_failure2.q =================================================================== --- ql/src/test/queries/clientnegative/alter_view_failure2.q (revision 0) +++ ql/src/test/queries/clientnegative/alter_view_failure2.q (revision 0) @@ -0,0 +1,8 @@ +DROP VIEW xxx4; +CREATE VIEW xxx4 +PARTITIONED ON (value) +AS +SELECT * FROM src; + +-- should fail: need to use ALTER VIEW, not ALTER TABLE +ALTER TABLE xxx4 ADD PARTITION (value='val_86'); Index: ql/src/test/queries/clientnegative/create_view_failure9.q =================================================================== --- ql/src/test/queries/clientnegative/create_view_failure9.q (revision 0) +++ ql/src/test/queries/clientnegative/create_view_failure9.q (revision 0) @@ -0,0 +1,6 @@ +DROP VIEW xxx18; + +-- should fail: partitioning columns out of order +CREATE VIEW xxx18 +PARTITIONED ON (value,key) +AS SELECT key+1 as k2,key,value FROM src; Index: ql/src/test/queries/clientnegative/alter_view_failure6.q =================================================================== --- ql/src/test/queries/clientnegative/alter_view_failure6.q (revision 0) +++ ql/src/test/queries/clientnegative/alter_view_failure6.q (revision 0) @@ -0,0 +1,11 @@ +DROP VIEW xxx7; +CREATE VIEW xxx7 +PARTITIONED ON (key) +AS +SELECT hr,key FROM srcpart; + +SET hive.mapred.mode=strict; + +-- strict mode should cause this to fail since view partition +-- predicate does not correspond to an underlying table partition predicate +ALTER VIEW xxx7 ADD PARTITION (key=10); Index: ql/src/test/queries/clientpositive/create_view_partitioned.q =================================================================== --- ql/src/test/queries/clientpositive/create_view_partitioned.q (revision 0) +++ ql/src/test/queries/clientpositive/create_view_partitioned.q (revision 0) @@ -0,0 +1,83 @@ +DROP VIEW vp1; +DROP VIEW vp2; +DROP VIEW vp3; + +-- test partitioned view definition +-- (underlying table is not actually partitioned) +CREATE VIEW vp1 +PARTITIONED ON (value) +AS +SELECT key, value +FROM src +WHERE key=86; +DESCRIBE EXTENDED vp1; +DESCRIBE FORMATTED vp1; + +SELECT * FROM vp1; + +SELECT key FROM vp1; + +SELECT value FROM vp1; + +ALTER VIEW vp1 +ADD PARTITION (value='val_86') PARTITION (value='val_xyz'); + +-- should work since we use IF NOT EXISTS +ALTER VIEW vp1 +ADD IF NOT EXISTS PARTITION (value='val_xyz'); + +SHOW PARTITIONS vp1; + +SHOW PARTITIONS vp1 PARTITION(value='val_86'); + +SHOW TABLE EXTENDED LIKE vp1; + +SHOW TABLE EXTENDED LIKE vp1 PARTITION(value='val_86'); + +ALTER VIEW vp1 +DROP PARTITION (value='val_xyz'); + +SET hive.exec.drop.ignorenonexistent=false; + +-- should work since we use IF EXISTS +ALTER VIEW vp1 +DROP IF EXISTS PARTITION (value='val_xyz'); + +SHOW PARTITIONS vp1; + +SET hive.mapred.mode=strict; + +-- Even though no partition predicate is specified in the next query, +-- the WHERE clause inside of the view should satisfy strict mode. +-- In other words, strict only applies to underlying tables +-- (regardless of whether or not the view is partitioned). +SELECT * FROM vp1; + +SET hive.mapred.mode=nonstrict; + +-- test a partitioned view on top of an underlying partitioned table, +-- but with only a suffix of the partitioning columns +CREATE VIEW vp2 +PARTITIONED ON (hr) +AS SELECT * FROM srcpart WHERE key < 10; +DESCRIBE FORMATTED vp2; + +ALTER VIEW vp2 ADD PARTITION (hr='11') PARTITION (hr='12'); +SELECT key FROM vp2 WHERE hr='12' ORDER BY key; + +-- test a partitioned view where the PARTITIONED ON clause references +-- an imposed column name +CREATE VIEW vp3(k,v) +PARTITIONED ON (v) +AS +SELECT key, value +FROM src +WHERE key=86; +DESCRIBE FORMATTED vp3; + +ALTER VIEW vp3 +ADD PARTITION (v='val_86'); + +DROP VIEW vp1; +DROP VIEW vp2; +DROP VIEW vp3; Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (revision 1068586) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (working copy) @@ -95,7 +95,9 @@ public Partition(Table tbl) throws HiveException { org.apache.hadoop.hive.metastore.api.Partition tPart = new org.apache.hadoop.hive.metastore.api.Partition(); - tPart.setSd(tbl.getTTable().getSd()); // TODO: get a copy + if (!tbl.isView()) { + tPart.setSd(tbl.getTTable().getSd()); // TODO: get a copy + } initialize(tbl, tPart); } @@ -134,6 +136,11 @@ tpart.setTableName(tbl.getTableName()); tpart.setValues(pvals); + if (tbl.isView()) { + initialize(tbl, tpart); + return; + } + StorageDescriptor sd = new StorageDescriptor(); try { // replace with THRIFT-138 @@ -172,15 +179,23 @@ this.table = table; this.tPartition = tPartition; + + if (table.isView()) { + return; + } + String partName = ""; - if (table.isPartitioned()) { try { partName = Warehouse.makePartName(table.getPartCols(), tPartition.getValues()); if (tPartition.getSd().getLocation() == null) { - // set default if location is not set - Path partPath = new Path(table.getDataLocation().toString(), partName); - tPartition.getSd().setLocation(partPath.toString()); + // set default if location is not set and this is a physical + // table partition (not a view partition) + if (table.getDataLocation() != null) { + Path partPath = new Path( + table.getDataLocation().toString(), partName); + tPartition.getSd().setLocation(partPath.toString()); + } } } catch (MetaException e) { throw new HiveException("Invalid partition for table " + table.getTableName(), @@ -466,7 +481,11 @@ } public String getLocation() { - return tPartition.getSd().getLocation(); + if (tPartition.getSd() == null) { + return null; + } else { + return tPartition.getSd().getLocation(); + } } public void setLocation(String location) { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1068586) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -829,7 +829,9 @@ Table tbl = db.getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName()); - validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.ADDPARTITION); + validateAlterTableType( + tbl, AlterTableDesc.AlterTableTypes.ADDPARTITION, + addPartitionDesc.getExpectView()); // If the add partition was created with IF NOT EXISTS, then we should // not throw an error if the specified part does exist. @@ -841,6 +843,9 @@ if (addPartitionDesc.getLocation() == null) { db.createPartition(tbl, addPartitionDesc.getPartSpec()); } else { + if (tbl.isView()) { + throw new HiveException("LOCATION clause illegal for view partition"); + } // set partition path relative to table db.createPartition(tbl, addPartitionDesc.getPartSpec(), new Path(tbl .getPath(), addPartitionDesc.getLocation())); @@ -1388,17 +1393,33 @@ } private void validateAlterTableType( - Table tbl, AlterTableDesc.AlterTableTypes alterType) throws HiveException { + Table tbl, AlterTableDesc.AlterTableTypes alterType) throws HiveException { + validateAlterTableType(tbl, alterType, false); + } + + private void validateAlterTableType( + Table tbl, AlterTableDesc.AlterTableTypes alterType, + boolean expectView) throws HiveException { + if (tbl.isView()) { + if (!expectView) { + throw new HiveException("Cannot alter a view with ALTER TABLE"); + } switch (alterType) { + case ADDPARTITION: + case DROPPARTITION: case ADDPROPS: // allow this form break; default: throw new HiveException( - "Cannot use this form of ALTER TABLE on a view"); + "Cannot use this form of ALTER on a view"); } + } else { + if (expectView) { + throw new HiveException("Cannot alter a base table with ALTER VIEW"); + } } if (tbl.isNonNative()) { @@ -2120,12 +2141,16 @@ String outputFormattCls = null; if (part != null) { if (par != null) { - tblLoc = par.getDataLocation().toString(); + if (par.getLocation() != null) { + tblLoc = par.getDataLocation().toString(); + } inputFormattCls = par.getInputFormatClass().getName(); outputFormattCls = par.getOutputFormatClass().getName(); } } else { - tblLoc = tbl.getDataLocation().toString(); + if (tbl.getPath() != null) { + tblLoc = tbl.getDataLocation().toString(); + } inputFormattCls = tbl.getInputFormatClass().getName(); outputFormattCls = tbl.getOutputFormatClass().getName(); } @@ -2162,16 +2187,23 @@ if (isPartitioned) { if (par == null) { for (Partition curPart : db.getPartitions(tbl)) { - locations.add(new Path(curPart.getTPartition().getSd() - .getLocation())); + if (curPart.getLocation() != null) { + locations.add(new Path(curPart.getLocation())); + } } } else { - locations.add(new Path(par.getTPartition().getSd().getLocation())); + if (par.getLocation() != null) { + locations.add(new Path(par.getLocation())); + } } } else { - locations.add(tablLoc); + if (tablLoc != null) { + locations.add(tablLoc); + } } - writeFileSystemStats(outStream, locations, tablLoc, false, 0); + if (!locations.isEmpty()) { + writeFileSystemStats(outStream, locations, tablLoc, false, 0); + } outStream.write(terminator); } @@ -2501,18 +2533,8 @@ } } - validateAlterTableType(tbl, alterTbl.getOp()); + validateAlterTableType(tbl, alterTbl.getOp(), alterTbl.getExpectView()); - if (tbl.isView()) { - if (!alterTbl.getExpectView()) { - throw new HiveException("Cannot alter a view with ALTER TABLE"); - } - } else { - if (alterTbl.getExpectView()) { - throw new HiveException("Cannot alter a base table with ALTER VIEW"); - } - } - Table oldTbl = tbl.copy(); if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) { @@ -2805,19 +2827,20 @@ // drop table is idempotent } - if (tbl != null) { - if (tbl.isView()) { - if (!dropTbl.getExpectView()) { - throw new HiveException("Cannot drop a view with DROP TABLE"); + if (dropTbl.getPartSpecs() == null) { + // This is a true DROP TABLE + if (tbl != null) { + if (tbl.isView()) { + if (!dropTbl.getExpectView()) { + throw new HiveException("Cannot drop a view with DROP TABLE"); + } + } else { + if (dropTbl.getExpectView()) { + throw new HiveException("Cannot drop a base table with DROP VIEW"); + } } - } else { - if (dropTbl.getExpectView()) { - throw new HiveException("Cannot drop a base table with DROP VIEW"); - } } - } - if (dropTbl.getPartSpecs() == null) { if (tbl != null && !tbl.canDrop()) { throw new HiveException("Table " + tbl.getTableName() + " is protected from being dropped"); @@ -2841,6 +2864,13 @@ work.getOutputs().add(new WriteEntity(tbl)); } } else { + // This is actually an ALTER TABLE DROP PARTITION + if (tbl != null) { + validateAlterTableType( + tbl, AlterTableDesc.AlterTableTypes.DROPPARTITION, + dropTbl.getExpectView()); + } + // get all partitions of the table List partitionNames = db.getPartitionNames(db.getCurrentDatabase(), dropTbl.getTableName(), (short) -1); @@ -3217,6 +3247,10 @@ tbl.getTTable().getParameters().putAll(crtView.getTblProps()); } + if (crtView.getPartCols() != null) { + tbl.setPartCols(crtView.getPartCols()); + } + int rc = setGenericTableAttributes(tbl); if (rc != 0) { return rc; Index: ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java (revision 1068586) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java (working copy) @@ -48,13 +48,13 @@ this.expectView = expectView; } - public DropTableDesc(String tableName, List> partSpecs) { + public DropTableDesc(String tableName, List> partSpecs, boolean expectView) { this.tableName = tableName; this.partSpecs = new ArrayList>(partSpecs.size()); for (int i = 0; i < partSpecs.size(); i++) { this.partSpecs.add(new LinkedHashMap(partSpecs.get(i))); } - expectView = false; + this.expectView = expectView; } /** Index: ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java (revision 1068586) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java (working copy) @@ -33,6 +33,7 @@ String dbName; String location; boolean ifNotExists; + boolean expectView; LinkedHashMap partSpec; /** @@ -52,15 +53,19 @@ * partition location, relative to table location. * @param ifNotExists * if true, the partition is only added if it doesn't exist + * @param expectView + * true for ALTER VIEW, false for ALTER TABLE */ public AddPartitionDesc(String dbName, String tableName, - Map partSpec, String location, boolean ifNotExists) { + Map partSpec, String location, boolean ifNotExists, + boolean expectView) { super(); this.dbName = dbName; this.tableName = tableName; this.partSpec = new LinkedHashMap(partSpec); this.location = location; this.ifNotExists = ifNotExists; + this.expectView = expectView; } /** @@ -137,4 +142,19 @@ public void setIfNotExists(boolean ifNotExists) { this.ifNotExists = ifNotExists; } + + /** + * @return whether to expect a view being altered + */ + public boolean getExpectView() { + return expectView; + } + + /** + * @param expectView + * set whether to expect a view being altered + */ + public void setExpectView(boolean expectView) { + this.expectView = expectView; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (revision 1068586) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (working copy) @@ -42,7 +42,8 @@ public static enum AlterTableTypes { RENAME, ADDCOLS, REPLACECOLS, ADDPROPS, ADDSERDE, ADDSERDEPROPS, ADDFILEFORMAT, ADDCLUSTERSORTCOLUMN, RENAMECOLUMN, ADDPARTITION, - TOUCH, ARCHIVE, UNARCHIVE, ALTERPROTECTMODE, ALTERPARTITIONPROTECTMODE, ALTERLOCATION, + TOUCH, ARCHIVE, UNARCHIVE, ALTERPROTECTMODE, ALTERPARTITIONPROTECTMODE, + ALTERLOCATION, DROPPARTITION }; public static enum ProtectModeType { Index: ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java (revision 1068586) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java (working copy) @@ -38,6 +38,8 @@ private String expandedText; private List schema; private Map tblProps; + private List partColNames; + private List partCols; private String comment; private boolean ifNotExists; @@ -48,11 +50,13 @@ } public CreateViewDesc(String viewName, List schema, - String comment, Map tblProps, boolean ifNotExists) { + String comment, Map tblProps, + List partColNames, boolean ifNotExists) { this.viewName = viewName; this.schema = schema; this.comment = comment; this.tblProps = tblProps; + this.partColNames = partColNames; this.ifNotExists = ifNotExists; } @@ -96,6 +100,27 @@ this.schema = schema; } + @Explain(displayName = "partition columns") + public List getPartColsString() { + return Utilities.getFieldSchemaString(partCols); + } + + public List getPartCols() { + return partCols; + } + + public void setPartCols(List partCols) { + this.partCols = partCols; + } + + public List getPartColNames() { + return partColNames; + } + + public void setPartColNames(List partColNames) { + this.partColNames = partColNames; + } + @Explain(displayName = "comment") public String getComment() { return comment; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/ErrorMsg.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/ErrorMsg.java (revision 1068586) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ErrorMsg.java (working copy) @@ -138,6 +138,9 @@ VIEW_COL_MISMATCH("The number of columns produced by the SELECT clause does not match the " + "number of column names specified by CREATE VIEW"), DML_AGAINST_VIEW("A view cannot be used as target table for LOAD or INSERT"), + ANALYZE_VIEW("ANALYZE is not supported for views"), + VIEW_PARTITION_TOTAL("At least one non-partitioning column must be present in view"), + VIEW_PARTITION_MISMATCH("Rightmost columns in view output do not match PARTITIONED ON clause"), PARTITION_DYN_STA_ORDER("Dynamic partition cannot be the parent of a static partition"), DYNAMIC_PARTITION_DISABLED("Dynamic partition is disabled. Either enable it by setting " + "hive.exec.dynamic.partition=true or specify partition column values"), Index: ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (revision 1068586) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (working copy) @@ -165,6 +165,9 @@ TOK_CREATEVIEW; TOK_DROPVIEW; TOK_ALTERVIEW_PROPERTIES; +TOK_ALTERVIEW_ADDPARTS; +TOK_ALTERVIEW_DROPPARTS; +TOK_VIEWPARTCOLS; TOK_EXPLAIN; TOK_TABLESERIALIZER; TOK_TABLEPROPERTIES; @@ -500,6 +503,10 @@ @init { msgs.push("alter view statement"); } @after { msgs.pop(); } : alterViewSuffixProperties + | alterStatementSuffixAddPartitions + -> ^(TOK_ALTERVIEW_ADDPARTS alterStatementSuffixAddPartitions) + | alterStatementSuffixDropPartitions + -> ^(TOK_ALTERVIEW_DROPPARTS alterStatementSuffixDropPartitions) ; alterIndexStatementSuffix @@ -898,18 +905,26 @@ } @after { msgs.pop(); } : KW_CREATE KW_VIEW ifNotExists? name=Identifier - (LPAREN columnNameCommentList RPAREN)? tableComment? + (LPAREN columnNameCommentList RPAREN)? tableComment? viewPartition? tablePropertiesPrefixed? KW_AS selectStatement -> ^(TOK_CREATEVIEW $name ifNotExists? columnNameCommentList? tableComment? + viewPartition? tablePropertiesPrefixed? selectStatement ) ; +viewPartition +@init { msgs.push("view partition specification"); } +@after { msgs.pop(); } + : KW_PARTITIONED KW_ON LPAREN columnNameList RPAREN + -> ^(TOK_VIEWPARTCOLS columnNameList) + ; + dropViewStatement @init { msgs.push("drop view statement"); } @after { msgs.pop(); } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (revision 1068586) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (working copy) @@ -73,6 +73,8 @@ commandType.put(HiveParser.TOK_ALTERINDEX_REBUILD, HiveOperation.ALTERINDEX_REBUILD); commandType.put(HiveParser.TOK_ALTERINDEX_PROPERTIES, HiveOperation.ALTERINDEX_PROPS); commandType.put(HiveParser.TOK_ALTERVIEW_PROPERTIES, HiveOperation.ALTERVIEW_PROPERTIES); + commandType.put(HiveParser.TOK_ALTERVIEW_ADDPARTS, HiveOperation.ALTERTABLE_ADDPARTS); + commandType.put(HiveParser.TOK_ALTERVIEW_DROPPARTS, HiveOperation.ALTERTABLE_DROPPARTS); commandType.put(HiveParser.TOK_QUERY, HiveOperation.QUERY); commandType.put(HiveParser.TOK_LOCKTABLE, HiveOperation.LOCKTABLE); commandType.put(HiveParser.TOK_UNLOCKTABLE, HiveOperation.UNLOCKTABLE); @@ -134,6 +136,8 @@ case HiveParser.TOK_ALTERINDEX_REBUILD: case HiveParser.TOK_ALTERINDEX_PROPERTIES: case HiveParser.TOK_ALTERVIEW_PROPERTIES: + case HiveParser.TOK_ALTERVIEW_ADDPARTS: + case HiveParser.TOK_ALTERVIEW_DROPPARTS: case HiveParser.TOK_SHOWDATABASES: case HiveParser.TOK_SHOWTABLES: case HiveParser.TOK_SHOW_TABLESTATUS: Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 1068586) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -792,6 +792,10 @@ } if (tab.isView()) { + if (qb.getParseInfo().isAnalyzeCommand()) { + throw new SemanticException(ErrorMsg.ANALYZE_VIEW.getMsg()); + } + replaceViewReferenceWithDefinition(qb, tab, tab_name, alias); continue; } @@ -6681,6 +6685,52 @@ expandedText = sb.toString(); } + if (createVwDesc.getPartColNames() != null) { + // Make sure all partitioning columns referenced actually + // exist and are in the correct order at the end + // of the list of columns produced by the view. Also move the field + // schema descriptors from derivedSchema to the partitioning key + // descriptor. + List partColNames = createVwDesc.getPartColNames(); + if (partColNames.size() > derivedSchema.size()) { + throw new SemanticException( + ErrorMsg.VIEW_PARTITION_MISMATCH.getMsg()); + } + + // Get the partition columns from the end of derivedSchema. + List partitionColumns = derivedSchema.subList( + derivedSchema.size() - partColNames.size(), + derivedSchema.size()); + + // Verify that the names match the PARTITIONED ON clause. + Iterator colNameIter = partColNames.iterator(); + Iterator schemaIter = partitionColumns.iterator(); + while (colNameIter.hasNext()) { + String colName = colNameIter.next(); + FieldSchema fieldSchema = schemaIter.next(); + if (!fieldSchema.getName().equals(colName)) { + throw new SemanticException( + ErrorMsg.VIEW_PARTITION_MISMATCH.getMsg()); + } + } + + // Boundary case: require at least one non-partitioned column + // for consistency with tables. + if (partColNames.size() == derivedSchema.size()) { + throw new SemanticException( + ErrorMsg.VIEW_PARTITION_TOTAL.getMsg()); + } + + // Now make a copy. + createVwDesc.setPartCols( + new ArrayList(partitionColumns)); + + // Finally, remove the partition columns from the end of derivedSchema. + // (Clearing the subList writes through to the underlying + // derivedSchema ArrayList.) + partitionColumns.clear(); + } + createVwDesc.setSchema(derivedSchema); createVwDesc.setViewExpandedText(expandedText); } @@ -6688,7 +6738,7 @@ private List convertRowSchemaToViewSchema(RowResolver rr) { List fieldSchemas = new ArrayList(); for (ColumnInfo colInfo : rr.getColumnInfos()) { - if (colInfo.getIsVirtualCol()) { + if (colInfo.isHiddenVirtualCol()) { continue; } String colName = rr.reverseLookup(colInfo.getInternalName())[1]; @@ -7163,6 +7213,7 @@ String comment = null; ASTNode selectStmt = null; Map tblProps = null; + List partColNames = null; LOG.info("Creating view " + tableName + " position=" + ast.getCharPositionInLine()); @@ -7185,13 +7236,16 @@ case HiveParser.TOK_TABLEPROPERTIES: tblProps = DDLSemanticAnalyzer.getProps((ASTNode) child.getChild(0)); break; + case HiveParser.TOK_VIEWPARTCOLS: + partColNames = getColumnNames((ASTNode) child.getChild(0)); + break; default: assert false; } } createVwDesc = new CreateViewDesc( - tableName, cols, comment, tblProps, ifNotExists); + tableName, cols, comment, tblProps, partColNames, ifNotExists); unparseTranslator.enable(); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), createVwDesc), conf)); Index: ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (revision 1068586) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (working copy) @@ -33,6 +33,7 @@ import java.util.Map.Entry; import java.util.Properties; import java.util.Set; +import java.util.regex.Pattern; import org.antlr.runtime.tree.CommonTree; import org.antlr.runtime.tree.Tree; @@ -47,6 +48,7 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -235,6 +237,16 @@ case HiveParser.TOK_ALTERVIEW_PROPERTIES: analyzeAlterTableProps(ast, true); break; + case HiveParser.TOK_ALTERVIEW_ADDPARTS: + // for ALTER VIEW ADD PARTITION, we wrapped the ADD to discriminate + // view from table; unwrap it now + analyzeAlterTableAddParts((ASTNode) ast.getChild(0), true); + break; + case HiveParser.TOK_ALTERVIEW_DROPPARTS: + // for ALTER VIEW DROP PARTITION, we wrapped the DROP to discriminate + // view from table; unwrap it now + analyzeAlterTableDropParts((ASTNode) ast.getChild(0), true); + break; case HiveParser.TOK_ALTERTABLE_RENAME: analyzeAlterTableRename(ast); break; @@ -257,10 +269,10 @@ analyzeAlterTableRenameCol(ast); break; case HiveParser.TOK_ALTERTABLE_ADDPARTS: - analyzeAlterTableAddParts(ast); + analyzeAlterTableAddParts(ast, false); break; case HiveParser.TOK_ALTERTABLE_DROPPARTS: - analyzeAlterTableDropParts(ast); + analyzeAlterTableDropParts(ast, false); break; case HiveParser.TOK_ALTERTABLE_PROPERTIES: analyzeAlterTableProps(ast, false); @@ -1563,11 +1575,14 @@ alterTblDesc), conf)); } - private void analyzeAlterTableDropParts(ASTNode ast) throws SemanticException { + private void analyzeAlterTableDropParts(ASTNode ast, boolean expectView) + throws SemanticException { + String tblName = unescapeIdentifier(ast.getChild(0).getText()); // get table metadata List> partSpecs = getPartitionSpecs(ast); - DropTableDesc dropTblDesc = new DropTableDesc(tblName, partSpecs); + DropTableDesc dropTblDesc = + new DropTableDesc(tblName, partSpecs, expectView); try { Table tab = db.getTable(db.getCurrentDatabase(), tblName, false); @@ -1597,17 +1612,23 @@ * * @param ast * The parsed command tree. + * + * @param expectView + * True for ALTER VIEW, false for ALTER TABLE. + * * @throws SemanticException - * Parsin failed + * Parsing failed */ - private void analyzeAlterTableAddParts(CommonTree ast) + private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) throws SemanticException { String tblName = unescapeIdentifier(ast.getChild(0).getText()); + boolean isView = false; try { Table tab = db.getTable(db.getCurrentDatabase(), tblName, false); if (tab != null) { inputs.add(new ReadEntity(tab)); + isView = tab.isView(); } } catch (HiveException e) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); @@ -1622,6 +1643,7 @@ String currentLocation = null; Map currentPart = null; boolean ifNotExists = false; + List partitionDescs = new ArrayList(); int numCh = ast.getChildCount(); for (int num = 1; num < numCh; num++) { @@ -1635,9 +1657,8 @@ validatePartitionValues(currentPart); AddPartitionDesc addPartitionDesc = new AddPartitionDesc( db.getCurrentDatabase(), tblName, currentPart, - currentLocation, ifNotExists); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - addPartitionDesc), conf)); + currentLocation, ifNotExists, expectView); + partitionDescs.add(addPartitionDesc); } // create new partition, set values currentLocation = null; @@ -1657,10 +1678,53 @@ validatePartitionValues(currentPart); AddPartitionDesc addPartitionDesc = new AddPartitionDesc( db.getCurrentDatabase(), tblName, currentPart, - currentLocation, ifNotExists); + currentLocation, ifNotExists, expectView); + partitionDescs.add(addPartitionDesc); + } + + for (AddPartitionDesc addPartitionDesc : partitionDescs) { rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - addPartitionDesc), conf)); + addPartitionDesc), conf)); } + + if (isView) { + // Compile internal query to capture underlying table partition + // dependencies + StringBuilder cmd = new StringBuilder(); + cmd.append("SELECT * FROM "); + cmd.append(HiveUtils.unparseIdentifier(tblName)); + cmd.append(" WHERE "); + boolean firstOr = true; + for (AddPartitionDesc partitionDesc : partitionDescs) { + if (firstOr) { + firstOr = false; + } else { + cmd.append(" OR "); + } + boolean firstAnd = true; + cmd.append("("); + for (Map.Entry entry + : partitionDesc.getPartSpec().entrySet()) + { + if (firstAnd) { + firstAnd = false; + } else { + cmd.append(" AND "); + } + cmd.append(HiveUtils.unparseIdentifier(entry.getKey())); + cmd.append(" = '"); + cmd.append(HiveUtils.escapeString(entry.getValue())); + cmd.append("'"); + } + cmd.append(")"); + } + Driver driver = new Driver(conf); + int rc = driver.compile(cmd.toString()); + if (rc != 0) { + throw new SemanticException(ErrorMsg.NO_VALID_PARTN.getMsg()); + } + inputs.addAll(driver.getPlan().getInputs()); + } } /**