diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 1ca8d31..b19bf45 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -2937,6 +2937,18 @@ public class DDLTask extends Task implements Serializable { } catch (URISyntaxException e) { throw new HiveException(e); } + } else if (alterTbl.getOp() == AlterTableTypes.ALTERBUCKETNUM) { + if (part != null) { + if (part.getBucketCount() != alterTbl.getNumberBuckets()) { + part.setBucketCount(alterTbl.getNumberBuckets()); + work.getOutputs().add(new WriteEntity(part)); + } + } else { + if (tbl.getNumBuckets() != alterTbl.getNumberBuckets()) { + tbl.setNumBuckets(alterTbl.getNumberBuckets()); + work.getOutputs().add(new WriteEntity(tbl)); + } + } } else { formatter.consoleError(console, "Unsupported Alter commnad", diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index 962194e..6103d97 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -362,6 +362,10 @@ public class Partition implements Serializable { */ } + public void setBucketCount(int newBucketNum) { + tPartition.getSd().setNumBuckets(newBucketNum); + } + public List getBucketCols() { return tPartition.getSd().getBucketCols(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index f7257cd..8e413e7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -207,6 +207,8 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { analyzeAlterTableSerdeProps(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) { analyzeAlterTableRenamePart(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_TABLEBUCKETS) { + analyzeAlterTableBucketNum(ast, tableName, partSpec); } break; } @@ -708,14 +710,9 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { // configured not to fail silently boolean throwException = !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT); - try { - Table tab = db.getTable(db.getCurrentDatabase(), tableName, throwException); - if (tab != null) { - inputs.add(new ReadEntity(tab)); - outputs.add(new WriteEntity(tab)); - } - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); + Table tab = getTable(db.getCurrentDatabase(), tableName, throwException); + if (tab != null) { + outputs.add(new WriteEntity(tab)); } DropTableDesc dropTblDesc = new DropTableDesc( @@ -1085,15 +1082,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { private void addInputsOutputsAlterTable(String tableName, HashMap partSpec, AlterTableDesc desc) throws SemanticException { - Table tab = null; - try { - tab = db.getTable(db.getCurrentDatabase(), tableName, true); - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); - } - - inputs.add(new ReadEntity(tab)); - + Table tab = getTable(db.getCurrentDatabase(), tableName, true); if ((partSpec == null) || (partSpec.isEmpty())) { outputs.add(new WriteEntity(tab)); } @@ -1310,15 +1299,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { private void analyzeAlterTableClusterSort(ASTNode ast) throws SemanticException { String tableName = getUnescapedName((ASTNode)ast.getChild(0)); - Table tab = null; - - try { - tab = db.getTable(db.getCurrentDatabase(), tableName, true); - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); - } - - inputs.add(new ReadEntity(tab)); + Table tab = getTable(db.getCurrentDatabase(), tableName, true); outputs.add(new WriteEntity(tab)); validateAlterTableType(tab, AlterTableTypes.ADDCLUSTERSORTCOLUMN); @@ -1581,19 +1562,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { break; } - try { - Table tab = null; - if (dbName == null) { - tab = db.getTable(tableName, true); - } - else { - tab = db.getTable(dbName, tableName, true); - } - inputs.add(new ReadEntity(tab)); - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); - } - + Table tab = getTable(dbName, tableName, true); showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), dbName, tableName); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showColumnsDesc), conf)); @@ -1892,17 +1861,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { if (newPartSpec == null) { throw new SemanticException("RENAME PARTITION Missing Destination" + ast); } - Table tab = null; - try { - tab = db.getTable(db.getCurrentDatabase(), tblName, false); - if (tab != null) { - inputs.add(new ReadEntity(tab)); - } else { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } + Table tab = getTable(db.getCurrentDatabase(), tblName, true); validateAlterTableType(tab, AlterTableTypes.RENAMEPARTITION); List> partSpecs = new ArrayList>(); @@ -1915,6 +1874,21 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { renamePartitionDesc), conf)); } + private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, + HashMap partSpec) throws SemanticException { + Table tab = getTable(db.getCurrentDatabase(), tblName, true); + if (tab.getBucketCols() == null) { + throw new SemanticException("Altering table is not bucketized"); + } + validateAlterTableType(tab, AlterTableTypes.ALTERBUCKETNUM); + + int bucketNum = Integer.parseInt(ast.getChild(0).getText()); + AlterTableDesc alterBucketNum = new AlterTableDesc(tblName, partSpec, bucketNum); + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + alterBucketNum), conf)); + } + private void analyzeAlterTableModifyCols(ASTNode ast, AlterTableTypes alterType) throws SemanticException { String tblName = getUnescapedName((ASTNode)ast.getChild(0)); @@ -1933,16 +1907,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { String tblName = getUnescapedName((ASTNode)ast.getChild(0)); // get table metadata List partSpecs = getFullPartitionSpecs(ast); - Table tab = null; - - try { - tab = db.getTable(db.getCurrentDatabase(), tblName, false); - if (tab != null) { - inputs.add(new ReadEntity(tab)); - } - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } + Table tab = getTable(db.getCurrentDatabase(), tblName, true); validateAlterTableType(tab, AlterTableTypes.DROPPARTITION, expectView); // Find out if all partition columns are strings. This is needed for JDO @@ -1999,17 +1964,8 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { throws SemanticException { String tblName = getUnescapedName((ASTNode)ast.getChild(0)); - boolean isView = false; - Table tab = null; - try { - tab = db.getTable(db.getCurrentDatabase(), tblName, false); - if (tab != null) { - inputs.add(new ReadEntity(tab)); - isView = tab.isView(); - } - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } + Table tab = getTable(db.getCurrentDatabase(), tblName, true); + boolean isView = tab.isView(); validateAlterTableType(tab, AlterTableTypes.ADDPARTITION, expectView); // partition name to value @@ -2127,16 +2083,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { throws SemanticException { String tblName = getUnescapedName((ASTNode)ast.getChild(0)); - Table tab; - - try { - tab = db.getTable(db.getCurrentDatabase(), tblName, false); - if (tab != null) { - inputs.add(new ReadEntity(tab)); - } - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } + Table tab = getTable(db.getCurrentDatabase(), tblName, true); validateAlterTableType(tab, AlterTableTypes.TOUCH); // partition name to value @@ -2172,15 +2119,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { // partition name to value List> partSpecs = getPartitionSpecs(ast); - Table tab = null; - try { - tab = db.getTable(db.getCurrentDatabase(), tblName, false); - if (tab != null) { - inputs.add(new ReadEntity(tab)); - } - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } + Table tab = getTable(db.getCurrentDatabase(), tblName, true); addTablePartsOutputs(tblName, partSpecs, true); validateAlterTableType(tab, AlterTableTypes.ARCHIVE); @@ -2427,4 +2366,21 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { } } } + + private Table getTable(String database, String tblName, boolean throwException) + throws SemanticException { + try { + Table tab = database == null ? db.getTable(tblName, false) + : db.getTable(database, tblName, false); + if (tab == null && throwException) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); + } + if (tab != null) { + inputs.add(new ReadEntity(tab)); + } + return tab; + } catch (HiveException e) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g index e969fbe..045e6bd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g @@ -745,6 +745,7 @@ alterTblPartitionStatementSuffix | alterStatementSuffixMergeFiles | alterStatementSuffixSerdeProperties | alterStatementSuffixRenamePart + | alterStatementSuffixBucketNum ; alterStatementSuffixFileFormat @@ -797,6 +798,12 @@ alterProtectModeMode | KW_READONLY -> ^(TOK_READONLY) ; +alterStatementSuffixBucketNum +@init { msgs.push(""); } +@after { msgs.pop(); } + : KW_INTO num=Number KW_BUCKETS + -> ^(TOK_TABLEBUCKETS $num) + ; alterStatementSuffixClusterbySortby @init {msgs.push("alter cluster by sort by statement");} diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index ad1a14c..992dbfd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -114,6 +114,9 @@ public final class SemanticAnalyzerFactory { HiveOperation.ALTERPARTITION_SERDEPROPERTIES }); tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_RENAMEPART, new HiveOperation[] {null, HiveOperation.ALTERTABLE_RENAMEPART}); + tablePartitionCommandType.put(HiveParser.TOK_TABLEBUCKETS, + new HiveOperation[] {HiveOperation.ALTERTABLE_BUCKETNUM, + HiveOperation.ALTERPARTITION_BUCKETNUM}); } public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index cc6c36d..8c7a0e5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -43,7 +43,7 @@ public class AlterTableDesc extends DDLDesc implements Serializable { RENAME, ADDCOLS, REPLACECOLS, ADDPROPS, ADDSERDE, ADDSERDEPROPS, ADDFILEFORMAT, ADDCLUSTERSORTCOLUMN, RENAMECOLUMN, ADDPARTITION, TOUCH, ARCHIVE, UNARCHIVE, ALTERPROTECTMODE, ALTERPARTITIONPROTECTMODE, - ALTERLOCATION, DROPPARTITION, RENAMEPARTITION + ALTERLOCATION, DROPPARTITION, RENAMEPARTITION, ALTERBUCKETNUM }; public static enum ProtectModeType { @@ -184,6 +184,13 @@ public class AlterTableDesc extends DDLDesc implements Serializable { this.partSpec = partSpec; } + public AlterTableDesc(String tableName, HashMap partSpec, int numBuckets) { + op = AlterTableTypes.ALTERBUCKETNUM; + this.oldName = tableName; + this.partSpec = partSpec; + this.numberBuckets = numBuckets; + } + @Explain(displayName = "new columns") public List getNewColsString() { return Utilities.getFieldSchemaString(getNewCols()); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java index cb54753..e4855cb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java @@ -49,6 +49,10 @@ public enum HiveOperation { ALTERTABLE_SERDEPROPERTIES("ALTERTABLE_SERDEPROPERTIES", new Privilege[]{Privilege.ALTER_METADATA}, null), ALTERPARTITION_SERDEPROPERTIES("ALTERPARTITION_SERDEPROPERTIES", new Privilege[]{Privilege.ALTER_METADATA}, null), ALTERTABLE_CLUSTER_SORT("ALTERTABLE_CLUSTER_SORT", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_BUCKETNUM("ALTERTABLE_BUCKETNUM", + new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERPARTITION_BUCKETNUM("ALTERPARTITION_BUCKETNUM", + new Privilege[]{Privilege.ALTER_METADATA}, null), SHOWDATABASES("SHOWDATABASES", new Privilege[]{Privilege.SHOW_DATABASE}, null), SHOWTABLES("SHOWTABLES", null, null), SHOWCOLUMNS("SHOWCOLUMNS", null, null), diff --git ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q index 89329a2..b6d1eb8 100644 --- ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q +++ ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q @@ -20,6 +20,20 @@ describe formatted tst1 partition (ds = '1'); describe formatted tst1; +-- Test changing bucket number of (table/partition) + +alter table tst1 into 4 buckets; + +describe formatted tst1; + +describe formatted tst1 partition (ds = '1'); + +alter table tst1 partition (ds = '1') into 6 buckets; + +describe formatted tst1; + +describe formatted tst1 partition (ds = '1'); + -- Test adding sort order alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets; diff --git ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out index d5979ab..0d7c490 100644 --- ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out +++ ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out @@ -207,6 +207,206 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 +PREHOOK: query: -- Test changing bucket number of (table/partition) + +alter table tst1 into 4 buckets +PREHOOK: type: ALTERTABLE_BUCKETNUM +PREHOOK: Input: default@tst1 +POSTHOOK: query: -- Test changing bucket number of (table/partition) + +alter table tst1 into 4 buckets +POSTHOOK: type: ALTERTABLE_BUCKETNUM +POSTHOOK: Input: default@tst1 +POSTHOOK: Output: default@tst1 +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted tst1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted tst1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +# col_name data_type comment + +key string None +value string None + +# Partition Information +# col_name data_type comment + +ds string None + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + numFiles 1 + numPartitions 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 4 +Bucket Columns: [key] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: describe formatted tst1 partition (ds = '1') +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted tst1 partition (ds = '1') +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +# col_name data_type comment + +key string None +value string None + +# Partition Information +# col_name data_type comment + +ds string None + +# Detailed Partition Information +Partition Value: [1] +Database: default +Table: tst1 +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + numFiles 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 8 +Bucket Columns: [key] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: alter table tst1 partition (ds = '1') into 6 buckets +PREHOOK: type: ALTERPARTITION_BUCKETNUM +PREHOOK: Input: default@tst1 +POSTHOOK: query: alter table tst1 partition (ds = '1') into 6 buckets +POSTHOOK: type: ALTERPARTITION_BUCKETNUM +POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1@ds=1 +POSTHOOK: Output: default@tst1@ds=1 +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted tst1 +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted tst1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +# col_name data_type comment + +key string None +value string None + +# Partition Information +# col_name data_type comment + +ds string None + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + numFiles 1 + numPartitions 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 4 +Bucket Columns: [key] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: describe formatted tst1 partition (ds = '1') +PREHOOK: type: DESCTABLE +POSTHOOK: query: describe formatted tst1 partition (ds = '1') +POSTHOOK: type: DESCTABLE +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +# col_name data_type comment + +key string None +value string None + +# Partition Information +# col_name data_type comment + +ds string None + +# Detailed Partition Information +Partition Value: [1] +Database: default +Table: tst1 +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: +#### A masked pattern was here #### + numFiles 1 + numRows 500 + rawDataSize 5312 + totalSize 5812 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 6 +Bucket Columns: [key] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 PREHOOK: query: -- Test adding sort order alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets