Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/MetaDataFormatUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/MetaDataFormatUtils.java (revision ) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/MetaDataFormatUtils.java (revision ) @@ -0,0 +1,191 @@ +package org.apache.hadoop.hive.ql.metadata; + +import org.apache.commons.lang.StringEscapeUtils; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.plan.DescTableDesc; + +import java.util.Date; +import java.util.List; +import java.util.Map; + +/** + * This class provides methods to format table information. + * + */ +public final class MetaDataFormatUtils { + + public static final String FIELD_DELIM = "\t"; + public static final String LINE_DELIM = "\n"; + + private static final int DEFAULT_STRINGBUILDER_SIZE = 2048; + private static final int ALIGNMENT = 20; + + private MetaDataFormatUtils() { + } + + public static String getAllColumnsInformation(Table table) { + + StringBuilder columnInformation = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE); + formatColumnsHeader(columnInformation); + formatAllFields(columnInformation, table.getCols()); + + // Partitions + if (table.isPartitioned()) { + columnInformation.append(LINE_DELIM).append("# Partition Information") + .append(LINE_DELIM); + formatColumnsHeader(columnInformation); + formatAllFields(columnInformation, table.getPartCols()); + } + return columnInformation.toString(); + } + + private static void formatColumnsHeader(StringBuilder columnInformation) { + formatOutput(getColumnsHeader(), columnInformation); + columnInformation.append(LINE_DELIM); + } + + public static String getAllColumnsInformation(List cols) { + StringBuilder columnInformation = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE); + formatColumnsHeader(columnInformation); + formatAllFields(columnInformation, cols); + return columnInformation.toString(); + } + + private static void formatAllFields(StringBuilder tableInfo, List cols) { + for (FieldSchema col : cols) { + formatFieldSchemas(tableInfo, col); + } + } + + public static String getPartitionInformation(Partition part) { + StringBuilder tableInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE); + + // Table Metadata + tableInfo.append("# Detailed Partition Information").append(LINE_DELIM); + getPartitionMetaDataInformation(tableInfo, part); + + // Storage information. + tableInfo.append(LINE_DELIM).append("# Storage Information").append(LINE_DELIM); + getStorageDescriptorInfo(tableInfo, part.getTPartition().getSd()); + + return tableInfo.toString(); + } + + public static String getTableInformation(Table table) { + StringBuilder tableInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE); + + // Table Metadata + tableInfo.append("# Detailed Table Information").append(LINE_DELIM); + getTableMetaDataInformation(tableInfo, table); + + // Storage information. + tableInfo.append(LINE_DELIM).append("# Storage Information").append(LINE_DELIM); + getStorageDescriptorInfo(tableInfo, table.getTTable().getSd()); + + if (table.isView()) { + tableInfo.append(LINE_DELIM).append("# View Information").append(LINE_DELIM); + getViewInfo(tableInfo, table); + } + + return tableInfo.toString(); + } + + private static void getViewInfo(StringBuilder tableInfo, Table tbl) { + formatOutput("View Original Text:", tbl.getViewOriginalText(), tableInfo); + formatOutput("View Expanded Text:", tbl.getViewExpandedText(), tableInfo); + } + + private static void getStorageDescriptorInfo(StringBuilder tableInfo, + StorageDescriptor storageDesc) { + + formatOutput("SerDe Library:", storageDesc.getSerdeInfo().getSerializationLib(), tableInfo); + formatOutput("InputFormat:", storageDesc.getInputFormat(), tableInfo); + formatOutput("OutputFormat:", storageDesc.getOutputFormat(), tableInfo); + formatOutput("Compressed:", storageDesc.isCompressed() ? "Yes" : "No", tableInfo); + formatOutput("Num Buckets:", String.valueOf(storageDesc.getNumBuckets()), tableInfo); + formatOutput("Bucket Columns:", storageDesc.getBucketCols().toString(), tableInfo); + formatOutput("Sort Columns:", storageDesc.getSortCols().toString(), tableInfo); + + if (storageDesc.getSerdeInfo().getParametersSize() > 0) { + tableInfo.append("Storage Desc Params:").append(LINE_DELIM); + displayAllParameters(storageDesc.getSerdeInfo().getParameters(), tableInfo); + } + } + + private static void getTableMetaDataInformation(StringBuilder tableInfo, Table tbl) { + formatOutput("Database:", tbl.getDbName(), tableInfo); + formatOutput("Owner:", tbl.getOwner(), tableInfo); + formatOutput("CreateTime:", formatDate(tbl.getTTable().getCreateTime()), tableInfo); + formatOutput("LastAccessTime:", formatDate(tbl.getTTable().getLastAccessTime()), tableInfo); + String protectMode = tbl.getProtectMode().toString(); + formatOutput("Protect Mode:", protectMode == null ? "None" : protectMode, tableInfo); + formatOutput("Retention:", Integer.toString(tbl.getRetention()), tableInfo); + formatOutput("Location:", tbl.getDataLocation().toString(), tableInfo); + formatOutput("Table Type:", tbl.getTableType().name(), tableInfo); + + if (tbl.getParameters().size() > 0) { + tableInfo.append("Table Parameters:").append(LINE_DELIM); + displayAllParameters(tbl.getParameters(), tableInfo); + } + } + + private static void getPartitionMetaDataInformation(StringBuilder tableInfo, Partition part) { + formatOutput("Partition Value:", part.getValues().toString(), tableInfo); + formatOutput("Database:", part.getTPartition().getDbName(), tableInfo); + formatOutput("Table:", part.getTable().getTableName(), tableInfo); + formatOutput("CreateTime:", formatDate(part.getTPartition().getCreateTime()), tableInfo); + formatOutput("LastAccessTime:", formatDate(part.getTPartition().getLastAccessTime()), + tableInfo); + String protectMode = part.getProtectMode().toString(); + formatOutput("Protect Mode:", protectMode == null ? "None" : protectMode, tableInfo); + formatOutput("Location:", part.getLocation(), tableInfo); + + if (part.getTPartition().getParameters().size() > 0) { + tableInfo.append("Partition Parameters:").append(LINE_DELIM); + displayAllParameters(part.getTPartition().getParameters(), tableInfo); + } + } + + private static void displayAllParameters(Map params, StringBuilder tableInfo) { + for (Map.Entry parameter: params.entrySet()) { + tableInfo.append(FIELD_DELIM); // Ensures all params are indented. + formatOutput(parameter.getKey(), StringEscapeUtils.escapeJava(parameter.getValue()), + tableInfo); + } + } + + private static void formatFieldSchemas(StringBuilder tableInfo, FieldSchema col) { + String comment = col.getComment() != null ? col.getComment() : "None"; + formatOutput(col.getName(), col.getType(), comment, tableInfo); + } + + private static String formatDate(long timeInSeconds) { + Date date = new Date(timeInSeconds * 1000); + return date.toString(); + } + + private static void formatOutput(String[] fields, StringBuilder tableInfo) { + for (String field : fields) { + tableInfo.append(String.format("%-" + ALIGNMENT + "s", field)).append(FIELD_DELIM); + } + tableInfo.append(LINE_DELIM); + } + + private static void formatOutput(String name, String value, + StringBuilder tableInfo) { + tableInfo.append(String.format("%-" + ALIGNMENT + "s", name)).append(FIELD_DELIM); + tableInfo.append(String.format("%-" + ALIGNMENT + "s", value)).append(LINE_DELIM); + } + + private static void formatOutput(String col1, String col2, String col3, + StringBuilder tableInfo) { + tableInfo.append(String.format("%-" + ALIGNMENT + "s", col1)).append(FIELD_DELIM); + tableInfo.append(String.format("%-" + ALIGNMENT + "s", col2)).append(FIELD_DELIM); + tableInfo.append(String.format("%-" + ALIGNMENT + "s", col3)).append(LINE_DELIM); + } + + public static String[] getColumnsHeader() { + return DescTableDesc.getSchema().split("#")[0].split(","); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 990026) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision ) @@ -68,14 +68,7 @@ import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.metadata.CheckResult; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker; -import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; -import org.apache.hadoop.hive.ql.metadata.InvalidTableException; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.metadata.*; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; @@ -1615,64 +1608,31 @@ LOG.info("DDLTask: got data for " + tbl.getTableName()); - List cols = null; - if (colPath.equals(tableName)) { - cols = tbl.getCols(); - if (part != null) { - cols = part.getCols(); - } - } else { - cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer()); - } - Path resFile = new Path(descTbl.getResFile()); - FileSystem fs = resFile.getFileSystem(conf); - DataOutput outStream = fs.create(resFile); + Path resFile = new Path(descTbl.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + DataOutput outStream = fs.create(resFile); - Iterator iterCols = cols.iterator(); - while (iterCols.hasNext()) { - // create a row per column - FieldSchema col = iterCols.next(); - outStream.writeBytes(col.getName()); - outStream.write(separator); - outStream.writeBytes(col.getType()); - outStream.write(separator); - outStream.writeBytes(col.getComment() == null ? "" : col.getComment()); - outStream.write(terminator); - } - if (tableName.equals(colPath)) { - // also return the partitioning columns - List partCols = tbl.getPartCols(); - Iterator iterPartCols = partCols.iterator(); - while (iterPartCols.hasNext()) { - FieldSchema col = iterPartCols.next(); - outStream.writeBytes(col.getName()); - outStream.write(separator); - outStream.writeBytes(col.getType()); - outStream.write(separator); - outStream - .writeBytes(col.getComment() == null ? "" : col.getComment()); - outStream.write(terminator); + if (colPath.equals(tableName)) { + outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(tbl)); + } else { + List cols = null; + cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer()); + outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(cols)); - } + } + if (tableName.equals(colPath)) { // if extended desc table then show the complete details of the table if (descTbl.isExt()) { // add empty line outStream.write(terminator); if (part != null) { // show partition information - outStream.writeBytes("Detailed Partition Information"); - outStream.write(separator); - outStream.writeBytes(part.getTPartition().toString()); - outStream.write(separator); + outStream.writeBytes(MetaDataFormatUtils.getPartitionInformation(part)); // comment column is empty outStream.write(terminator); } else { // show table information - outStream.writeBytes("Detailed Table Information"); - outStream.write(separator); - outStream.writeBytes(tbl.getTTable().toString()); - outStream.write(separator); - // comment column is empty + outStream.writeBytes(MetaDataFormatUtils.getTableInformation(tbl)); outStream.write(terminator); } } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java (revision 910647) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java (revision ) @@ -69,7 +69,7 @@ return table; } - public String getSchema() { + public static String getSchema() { return schema; }