Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/MetaDataFormatUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/MetaDataFormatUtils.java (revision ) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/MetaDataFormatUtils.java (revision ) @@ -0,0 +1,122 @@ +package org.apache.hadoop.hive.ql.metadata; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; + +import java.util.Date; +import java.util.Map; + +public class MetaDataFormatUtils { + + public static final String FIELD_DELIM = "\t"; + public static final String LINE_DELIM = "\n"; + + + public static String getAllColumnsInformation(Table tbl) { + StringBuilder columnInformation = new StringBuilder(1024); + formatOutput("Column", "Type", "Comments", columnInformation); + formatOutput("------", "----", "--------", columnInformation); + getColumnInfoAsString(columnInformation, tbl); + + // Partitions + if (tbl.isPartitioned()) { + columnInformation.append(LINE_DELIM); + formatOutput("Partition", "Type", "Comments", columnInformation); + formatOutput("---------", "----", "--------", columnInformation); + getPartitionInfoAsString(columnInformation, tbl); + } + return columnInformation.toString(); + } + + private static void getColumnInfoAsString(StringBuilder tableInfo, Table tbl) { + for (FieldSchema col : tbl.getCols()) { + formatFieldSchemas(tableInfo, col); + } + } + + private static void getPartitionInfoAsString(StringBuilder tableInfo, Table tbl) { + for (FieldSchema partition : tbl.getPartCols()) { + formatFieldSchemas(tableInfo, partition); + } + } + + public static String displayTableInformation(Table tbl) { + StringBuilder tableInfo = new StringBuilder(2048); + + // Table Metadata + tableInfo.append("# Detailed Information").append(LINE_DELIM); + getTableMetaDataInformation(tableInfo, tbl); + + // Storage information. + tableInfo.append(LINE_DELIM).append("# Storage Information").append(LINE_DELIM); + getStorageDescriptorInfo(tableInfo, tbl); + + if (tbl.isView()) { + tableInfo.append(LINE_DELIM).append("# View Information").append(LINE_DELIM); + getViewInfo(tableInfo, tbl); + } + + return tableInfo.toString(); + } + + private static void getViewInfo(StringBuilder tableInfo, Table tbl) { + formatOutput("View Original Text:", tbl.getViewOriginalText(), tableInfo); + formatOutput("View Expanded Text:", tbl.getViewExpandedText(), tableInfo); + } + + private static void getStorageDescriptorInfo(StringBuilder tableInfo, Table tbl) { + formatOutput("SerDe:", tbl.getSerializationLib(), tableInfo); + formatOutput("InputFormat:", tbl.getInputFormatClass().getName(), tableInfo); + formatOutput("OutputFormat:", tbl.getOutputFormatClass().getName(), tableInfo); + formatOutput("Sort Columns:", tbl.getSortCols().toString(), tableInfo); + formatOutput("Bucket Columns:", tbl.getSortCols().toString(), tableInfo); + + if (tbl.getTTable().getSd().getSerdeInfo().getParametersSize() > 0) { + tableInfo.append("Storage Desc Params:").append(LINE_DELIM); + displayAllParameters(tbl.getTTable().getSd().getSerdeInfo().getParameters(), tableInfo); + } + } + + private static void getTableMetaDataInformation(StringBuilder tableInfo, Table tbl) { + formatOutput("Database:", tbl.getDbName(), tableInfo); + formatOutput("Owner:", tbl.getOwner(), tableInfo); + formatOutput("CreateTime:", formatDate(tbl.getTTable().getCreateTime()), tableInfo); + formatOutput("LastAccessTime:", formatDate(tbl.getTTable().getLastAccessTime()), tableInfo); + String protectMode = tbl.getProtectMode().toString(); + formatOutput("Protect Mode:", protectMode == null ? "None" : protectMode, tableInfo); + formatOutput("Retention:", Integer.toString(tbl.getRetention()), tableInfo); + formatOutput("Location:", tbl.getDataLocation().toString(), tableInfo); + + if (tbl.getParameters().size() > 0) { + tableInfo.append("Table Parameters:").append(LINE_DELIM); + displayAllParameters(tbl.getParameters(), tableInfo); + } + } + + private static void displayAllParameters(Map params, StringBuilder tableInfo) { + for (Map.Entry parameter: params.entrySet()) { + tableInfo.append(FIELD_DELIM); // Ensures all params are indented. + formatOutput(parameter.getKey(), parameter.getValue(), tableInfo); + } + } + + private static void formatFieldSchemas(StringBuilder tableInfo, FieldSchema col) { + String comment = col.getComment() != null ? col.getComment() : "None"; + formatOutput(col.getName(), col.getType(), comment, tableInfo); + } + + private static String formatDate(long timeInSeconds) { + Date date = new Date(timeInSeconds * 1000); + return date.toString(); + } + + private static void formatOutput(String name, String value, StringBuilder tableInfo) { + tableInfo.append(String.format("%-20s", name)).append(FIELD_DELIM); + tableInfo.append(String.format("%-15s", value)).append(LINE_DELIM); + } + + private static void formatOutput(String col1, String col2, String col3, StringBuilder tableInfo) { + tableInfo.append(String.format("%-20s", col1)).append(FIELD_DELIM); + tableInfo.append(String.format("%-20s", col2)).append(FIELD_DELIM); + tableInfo.append(String.format("%-15s", col3)).append(LINE_DELIM); + } +} \ No newline at end of file Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 985234) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision ) @@ -62,14 +62,7 @@ import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.metadata.CheckResult; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker; -import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; -import org.apache.hadoop.hive.ql.metadata.InvalidTableException; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.metadata.*; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; @@ -1380,33 +1373,10 @@ Path resFile = new Path(descTbl.getResFile()); FileSystem fs = resFile.getFileSystem(conf); DataOutput outStream = fs.create(resFile); - Iterator iterCols = cols.iterator(); - while (iterCols.hasNext()) { - // create a row per column - FieldSchema col = iterCols.next(); - outStream.writeBytes(col.getName()); - outStream.write(separator); - outStream.writeBytes(col.getType()); - outStream.write(separator); - outStream.writeBytes(col.getComment() == null ? "" : col.getComment()); - outStream.write(terminator); - } + outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(tbl)); if (tableName.equals(colPath)) { - // also return the partitioning columns - List partCols = tbl.getPartCols(); - Iterator iterPartCols = partCols.iterator(); - while (iterPartCols.hasNext()) { - FieldSchema col = iterPartCols.next(); - outStream.writeBytes(col.getName()); - outStream.write(separator); - outStream.writeBytes(col.getType()); - outStream.write(separator); - outStream - .writeBytes(col.getComment() == null ? "" : col.getComment()); - outStream.write(terminator); - } - + // if extended desc table then show the complete details of the table if (descTbl.isExt()) { // add empty line @@ -1421,11 +1391,7 @@ outStream.write(terminator); } else { // show table information - outStream.writeBytes("Detailed Table Information"); - outStream.write(separator); - outStream.writeBytes(tbl.getTTable().toString()); - outStream.write(separator); - // comment column is empty + outStream.writeBytes(MetaDataFormatUtils.displayTableInformation(tbl)); outStream.write(terminator); } }