diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index b0f5c49..0206477 100644
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -834,7 +834,7 @@
HIVE_DRIVER_RUN_HOOKS("hive.exec.driver.run.hooks", ""),
HIVE_DDL_OUTPUT_FORMAT("hive.ddl.output.format", null),
HIVE_ENTITY_SEPARATOR("hive.entity.separator", "@"),
-
+ HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY("hive.display.partition.cols.separately",true),
HIVE_SERVER2_MAX_START_ATTEMPTS("hive.server2.max.start.attempts", 30L,
new LongRangeValidator(0L, Long.MAX_VALUE)),
diff --git conf/hive-default.xml.template conf/hive-default.xml.template
index a8da2ca..8aaf3a0 100644
--- conf/hive-default.xml.template
+++ conf/hive-default.xml.template
@@ -1876,6 +1876,17 @@
+ hive.display.partition.cols.separately
+ true
+
+ In older Hive version (0.10 and earlier) no distinction was made between
+ partition columns or non-partition columns while displaying columns in describe
+ table. From 0.12 onwards, they are displayed separately. This flag will let you
+ get old behavior, if desired. See, test-case in patch for HIVE-6689.
+
+
+
+
hive.transform.escape.input
false
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
index de04cca..de47b21 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
@@ -29,6 +29,7 @@
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Index;
@@ -92,14 +93,14 @@ public static String getAllColumnsInformation(List cols,
* @return string with formatted column information
*/
public static String getAllColumnsInformation(List cols,
- List partCols, boolean printHeader, boolean isOutputPadded) {
+ List partCols, boolean printHeader, boolean isOutputPadded, boolean showPartColsSep) {
StringBuilder columnInformation = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
if(printHeader){
formatColumnsHeader(columnInformation);
}
formatAllFields(columnInformation, cols, isOutputPadded);
- if ((partCols != null) && (!partCols.isEmpty())) {
+ if ((partCols != null) && !partCols.isEmpty() && showPartColsSep) {
columnInformation.append(LINE_DELIM).append("# Partition Information")
.append(LINE_DELIM);
formatColumnsHeader(columnInformation);
@@ -371,7 +372,7 @@ public static MetaDataFormatter getFormatter(HiveConf conf) {
if ("json".equals(conf.get(HiveConf.ConfVars.HIVE_DDL_OUTPUT_FORMAT.varname, "text"))) {
return new JsonMetaDataFormatter();
} else {
- return new TextMetaDataFormatter(conf.getIntVar(HiveConf.ConfVars.CLIPRETTYOUTPUTNUMCOLS));
+ return new TextMetaDataFormatter(conf.getIntVar(HiveConf.ConfVars.CLIPRETTYOUTPUTNUMCOLS), conf.getBoolVar(ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY));
}
}
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
index 0c49250..ccdff17 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
@@ -57,9 +57,11 @@
* If -1, then the current terminal width is auto-detected and used.
*/
private final int prettyOutputNumCols;
+ private final boolean showPartColsSeparately;
- public TextMetaDataFormatter(int prettyOutputNumCols) {
+ public TextMetaDataFormatter(int prettyOutputNumCols, boolean partColsSeparately) {
this.prettyOutputNumCols = prettyOutputNumCols;
+ this.showPartColsSeparately = partColsSeparately;
}
/**
@@ -123,7 +125,7 @@ public void describeTable(DataOutputStream outStream, String colPath,
MetaDataPrettyFormatUtils.getAllColumnsInformation(
cols, partCols, prettyOutputNumCols)
:
- MetaDataFormatUtils.getAllColumnsInformation(cols, partCols, isFormatted, isOutputPadded);
+ MetaDataFormatUtils.getAllColumnsInformation(cols, partCols, isFormatted, isOutputPadded, showPartColsSeparately);
} else {
output = MetaDataFormatUtils.getAllColumnsInformation(cols, isFormatted, isOutputPadded);
}
diff --git ql/src/test/queries/clientpositive/desc_tbl_part_cols.q ql/src/test/queries/clientpositive/desc_tbl_part_cols.q
new file mode 100644
index 0000000..89e4931
--- /dev/null
+++ ql/src/test/queries/clientpositive/desc_tbl_part_cols.q
@@ -0,0 +1,7 @@
+create table t1 (a int, b string) partitioned by (c int, d string);
+describe t1;
+
+set hive.display.partition.cols.separately=false;
+describe t1;
+
+set hive.display.partition.cols.separately=true;
diff --git ql/src/test/results/clientpositive/desc_tbl_part_cols.q.out ql/src/test/results/clientpositive/desc_tbl_part_cols.q.out
new file mode 100644
index 0000000..826b559
--- /dev/null
+++ ql/src/test/results/clientpositive/desc_tbl_part_cols.q.out
@@ -0,0 +1,29 @@
+PREHOOK: query: create table t1 (a int, b string) partitioned by (c int, d string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: create table t1 (a int, b string) partitioned by (c int, d string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1
+PREHOOK: query: describe t1
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe t1
+POSTHOOK: type: DESCTABLE
+a int None
+b string None
+c int None
+d string None
+
+# Partition Information
+# col_name data_type comment
+
+c int None
+d string None
+PREHOOK: query: describe t1
+PREHOOK: type: DESCTABLE
+POSTHOOK: query: describe t1
+POSTHOOK: type: DESCTABLE
+a int None
+b string None
+c int None
+d string None