Index: metastore/scripts/upgrade/001-HIVE-2795.update_view_partitions.py =================================================================== --- metastore/scripts/upgrade/001-HIVE-2795.update_view_partitions.py (revision 1328473) +++ metastore/scripts/upgrade/001-HIVE-2795.update_view_partitions.py (working copy) @@ -1,157 +0,0 @@ -#!/usr/local/bin/python -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# This script, provided with a list of view partitions, drop each partition and -# add them back via the metastore Thrift server. This is needed because prior -# to HIVE-2795 view partitions were created without storage descriptors, which -# breaks commands such as DESCRIBE FORMATTED when called on these partitions. -# Altering a view's partition is not currently supported via the Hive CLI, and -# it results in an exception when attempted through the metastore Thrift server -# (due to the storage descriptor being null) so no data will be lost by dropping -# and adding the partition. -# -# WARNING: dropping and adding the partition is non-atomic. The script outputs -# each line of the file as it processes it. You should pipe this -# ouptut to a log file so that, if the machine fails between dropping -# and adding, you know which partition may not have been added. If it -# has not, go to the Hive CLI and run the command -# -# ALTER VIEW ADD PARTITION (); -# -# where view_name is the name of the view, which can be taken directly -# from the line in the log, and part_spec is the partition -# specification, which can be determined from the line in the log -# E.g. if the partition name is col1=a/col2=b/col3=c part_spec should -# be col1='a', col2='b', col3='c' -# -# NOTE: If any partition contains characters which are escaped, this script will -# not work, this includes ASCII values 1-31,127 and the characters -# " # % ' * / : = ? \ { [ ] - -# Before running this script first execute the following query against your -# metastore: -# -# SELECT name, tbl_name, part_name -# FROM -# DBS d JOIN TBLS t ON d.db_id = t.db_id -# JOIN PARTITIONS p ON t.tbl_id = p.tbl_id -# WHERE t.tbl_type = "VIRTUAL_VIEW"; -# -# Place the results of this query in a file. The format of the file should be -# as follow: -# -# db_nametbl_namepart_name -# -# where represents a column separator (tab by default). -# -# Then execute this script passing in the path to the file you created, as well -# as the metastore host, port, and timeout and the separator used in the file if -# they differ from the defaults. - -# To run this script you need Thrift Python library, as well as Hive's metastore -# Python library in your PYTHONPATH, Hive's metastore Python library can be -# found in trunk/build/dist/lib/py/ - -from optparse import OptionGroup -from optparse import OptionParser - -from thrift import Thrift -from thrift.transport import TSocket -from thrift.transport import TTransport -from thrift.protocol import TBinaryProtocol - -from hive_metastore import ThriftHiveMetastore - -# Parse args -parser = OptionParser() - -mandatory_options = OptionGroup(parser, "Mandatory Settings", - "These must be set, they have no defaults") - -mandatory_options.add_option("--file", action="store", type="string", dest="file", - help="file containing the list of view partitions " + - "stored as db_nametable_namepart_name") - -parser.add_option_group(mandatory_options) - -other_options = OptionGroup(parser, "Other Options", - "These options all have default values") - -other_options.add_option("--host", action="store", type="string", dest="host", - default="localhost", - help="hostname of metastore server, " + - "the default is localhost") -other_options.add_option("--port", action="store", type="string", dest="port", - default="9083", - help="port for metastore server, the default is 9083") -other_options.add_option("--timeout", action="store", type="string", dest="timeout", - default=None, - help="timeout for connection to metastore server, " + - "uses Thrift's default") -other_options.add_option("--separator", action="store", type="string", dest="separator", - default="\t", - help="the separator between db_name, table_name, and " + - "part_name in the file passed in, the default " + - "is tab") - -parser.add_option_group(other_options) - -(options, args) = parser.parse_args() - -host = options.host -port = options.port -timeout = options.timeout -file = options.file -separator = options.separator - -# Prepare the Thrift connection to the metastore - -_socket = TSocket.TSocket(host, port) -_socket.setTimeout(timeout) -_transport = TTransport.TBufferedTransport(_socket) -_protocol = TBinaryProtocol.TBinaryProtocol(_transport) - -client = ThriftHiveMetastore.Client(_protocol) -_transport.open() - -# Iterate over the file of partitions - -partition_file=open(file,'r') -db_name = '' -table_name = '' -part_name = '' - -for line in partition_file: - - line = line.rstrip("\n\r") - (db_name,table_name,part_name)=line.split(separator) - - print line - - # Get the partition associated with this line - - partition = client.get_partition_by_name(db_name, table_name, part_name) - - # Drop it - - client.drop_partition_by_name(db_name, table_name, part_name, 0) - - # Add it back - - client.add_partition(partition) Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1328473) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -1243,12 +1243,11 @@ "Unable to add partition because table or database do not exist"); } - if (tbl.getTableType().equals(TableType.VIRTUAL_VIEW.toString()) && part.getSd() == null) { - part.setSd(tbl.getSd().deepCopy()); + String partLocationStr = null; + if (part.getSd() != null) { + partLocationStr = part.getSd().getLocation(); } - String partLocationStr = part.getSd().getLocation(); - if (partLocationStr == null || partLocationStr.isEmpty()) { // set default location if not specified and this is // a physical table partition (not a view) Index: ql/src/test/results/clientpositive/describe_formatted_view_partitioned_json.q.out =================================================================== --- ql/src/test/results/clientpositive/describe_formatted_view_partitioned_json.q.out (revision 0) +++ ql/src/test/results/clientpositive/describe_formatted_view_partitioned_json.q.out (revision 0) @@ -0,0 +1,45 @@ +PREHOOK: query: DROP VIEW view_partitioned +PREHOOK: type: DROPVIEW +POSTHOOK: query: DROP VIEW view_partitioned +POSTHOOK: type: DROPVIEW +PREHOOK: query: CREATE VIEW view_partitioned +PARTITIONED ON (value) +AS +SELECT key, value +FROM src +WHERE key=86 +PREHOOK: type: CREATEVIEW +#### A masked pattern was here #### +POSTHOOK: query: CREATE VIEW view_partitioned +PARTITIONED ON (value) +AS +SELECT key, value +FROM src +WHERE key=86 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Output: default@view_partitioned +#### A masked pattern was here #### +PREHOOK: query: ALTER VIEW view_partitioned +ADD PARTITION (value='val_86') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@src +PREHOOK: Input: default@view_partitioned +POSTHOOK: query: ALTER VIEW view_partitioned +ADD PARTITION (value='val_86') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@src +POSTHOOK: Input: default@view_partitioned +POSTHOOK: Output: default@view_partitioned@value=val_86 +PREHOOK: query: DESCRIBE FORMATTED view_partitioned PARTITION (value='val_86') +PREHOOK: type: DESCTABLE +POSTHOOK: query: DESCRIBE FORMATTED view_partitioned PARTITION (value='val_86') +POSTHOOK: type: DESCTABLE +{"columns":[{"name":"key","type":"string"}]} +PREHOOK: query: DROP VIEW view_partitioned +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@view_partitioned +PREHOOK: Output: default@view_partitioned +POSTHOOK: query: DROP VIEW view_partitioned +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@view_partitioned +POSTHOOK: Output: default@view_partitioned Index: ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out =================================================================== --- ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out (revision 1328473) +++ ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out (working copy) @@ -52,15 +52,6 @@ #### A masked pattern was here #### Partition Parameters: #### A masked pattern was here #### - -# Storage Information -SerDe Library: null -InputFormat: org.apache.hadoop.mapred.SequenceFileInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] PREHOOK: query: DROP VIEW view_partitioned PREHOOK: type: DROPVIEW PREHOOK: Input: default@view_partitioned Index: ql/src/test/queries/clientpositive/describe_formatted_view_partitioned_json.q =================================================================== --- ql/src/test/queries/clientpositive/describe_formatted_view_partitioned_json.q (revision 0) +++ ql/src/test/queries/clientpositive/describe_formatted_view_partitioned_json.q (revision 0) @@ -0,0 +1,17 @@ +set hive.ddl.output.format=json; + +DROP VIEW view_partitioned; + +CREATE VIEW view_partitioned +PARTITIONED ON (value) +AS +SELECT key, value +FROM src +WHERE key=86; + +ALTER VIEW view_partitioned +ADD PARTITION (value='val_86'); + +DESCRIBE FORMATTED view_partitioned PARTITION (value='val_86'); + +DROP VIEW view_partitioned; Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java (revision 1328473) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java (working copy) @@ -25,6 +25,7 @@ import java.util.Map; import org.apache.commons.lang.StringEscapeUtils; +import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; @@ -147,8 +148,10 @@ getPartitionMetaDataInformation(tableInfo, part); // Storage information. - tableInfo.append(LINE_DELIM).append("# Storage Information").append(LINE_DELIM); - getStorageDescriptorInfo(tableInfo, part.getTPartition().getSd()); + if (part.getTable().getTableType() != TableType.VIRTUAL_VIEW) { + tableInfo.append(LINE_DELIM).append("# Storage Information").append(LINE_DELIM); + getStorageDescriptorInfo(tableInfo, part.getTPartition().getSd()); + } return tableInfo.toString(); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1328473) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -191,10 +191,11 @@ // Pick the formatter to use to display the results. Either the // normal human readable output or a json object. if ("json".equals(conf.get( - HiveConf.ConfVars.HIVE_DDL_OUTPUT_FORMAT.varname, "text"))) + HiveConf.ConfVars.HIVE_DDL_OUTPUT_FORMAT.varname, "text"))) { formatter = new JsonMetaDataFormatter(); - else + } else { formatter = new TextMetaDataFormatter(); + } INTERMEDIATE_ARCHIVED_DIR_SUFFIX = HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED); @@ -2361,8 +2362,9 @@ formatter.MISSING); } else { Map params = null; - if(descDatabase.isExt()) - params = database.getParameters(); + if(descDatabase.isExt()) { + params = database.getParameters(); + } formatter.showDatabaseDescription(outStream, database.getName(), @@ -2588,7 +2590,8 @@ List cols = null; if (colPath.equals(tableName)) { - cols = (part == null) ? tbl.getCols() : part.getCols(); + cols = (part == null || tbl.getTableType() == TableType.VIRTUAL_VIEW) ? + tbl.getCols() : part.getCols(); if (!descTbl.isFormatted()) { if (tableName.equals(colPath)) {