Index: ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out =================================================================== --- ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out (revision 0) +++ ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out (working copy) @@ -0,0 +1,200 @@ +PREHOOK: query: create table alter_table_partition_clusterby_sortby (a int, b int) partitioned by (c string) clustered by (a, b) sorted by (a desc, b asc) into 4 buckets +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_table_partition_clusterby_sortby (a int, b int) partitioned by (c string) clustered by (a, b) sorted by (a desc, b asc) into 4 buckets +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_table_partition_clusterby_sortby +PREHOOK: query: alter table alter_table_partition_clusterby_sortby add partition(c='abc') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@alter_table_partition_clusterby_sortby +POSTHOOK: query: alter table alter_table_partition_clusterby_sortby add partition(c='abc') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@alter_table_partition_clusterby_sortby +POSTHOOK: Output: default@alter_table_partition_clusterby_sortby@c=abc +PREHOOK: query: -- Turn off sorting for a partition + +alter table alter_table_partition_clusterby_sortby partition(c='abc') not sorted +PREHOOK: type: ALTERTABLE_CLUSTER_SORT +PREHOOK: Input: default@alter_table_partition_clusterby_sortby +PREHOOK: Output: default@alter_table_partition_clusterby_sortby@c=abc +POSTHOOK: query: -- Turn off sorting for a partition + +alter table alter_table_partition_clusterby_sortby partition(c='abc') not sorted +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT +POSTHOOK: Input: default@alter_table_partition_clusterby_sortby +POSTHOOK: Input: default@alter_table_partition_clusterby_sortby@c=abc +POSTHOOK: Output: default@alter_table_partition_clusterby_sortby@c=abc +PREHOOK: query: desc formatted alter_table_partition_clusterby_sortby partition(c='abc') +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc formatted alter_table_partition_clusterby_sortby partition(c='abc') +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +a int None +b int None + +# Partition Information +# col_name data_type comment + +c string None + +# Detailed Partition Information +Partition Value: [abc] +Database: default +Table: alter_table_partition_clusterby_sortby +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 4 +Bucket Columns: [a, b] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: -- Modify clustering for a partition + +alter table alter_table_partition_clusterby_sortby partition(c='abc') clustered by (b) sorted by (b desc) into 4 buckets +PREHOOK: type: ALTERTABLE_CLUSTER_SORT +PREHOOK: Input: default@alter_table_partition_clusterby_sortby +PREHOOK: Output: default@alter_table_partition_clusterby_sortby@c=abc +POSTHOOK: query: -- Modify clustering for a partition + +alter table alter_table_partition_clusterby_sortby partition(c='abc') clustered by (b) sorted by (b desc) into 4 buckets +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT +POSTHOOK: Input: default@alter_table_partition_clusterby_sortby +POSTHOOK: Input: default@alter_table_partition_clusterby_sortby@c=abc +POSTHOOK: Output: default@alter_table_partition_clusterby_sortby@c=abc +PREHOOK: query: desc formatted alter_table_partition_clusterby_sortby partition(c='abc') +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc formatted alter_table_partition_clusterby_sortby partition(c='abc') +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +a int None +b int None + +# Partition Information +# col_name data_type comment + +c string None + +# Detailed Partition Information +Partition Value: [abc] +Database: default +Table: alter_table_partition_clusterby_sortby +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 4 +Bucket Columns: [b] +Sort Columns: [Order(col:b, order:0)] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: -- Turn off clustering for a partition + +alter table alter_table_partition_clusterby_sortby partition(c='abc') not clustered +PREHOOK: type: ALTERTABLE_CLUSTER_SORT +PREHOOK: Input: default@alter_table_partition_clusterby_sortby +PREHOOK: Output: default@alter_table_partition_clusterby_sortby@c=abc +POSTHOOK: query: -- Turn off clustering for a partition + +alter table alter_table_partition_clusterby_sortby partition(c='abc') not clustered +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT +POSTHOOK: Input: default@alter_table_partition_clusterby_sortby +POSTHOOK: Input: default@alter_table_partition_clusterby_sortby@c=abc +POSTHOOK: Output: default@alter_table_partition_clusterby_sortby@c=abc +PREHOOK: query: desc formatted alter_table_partition_clusterby_sortby partition(c='abc') +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc formatted alter_table_partition_clusterby_sortby partition(c='abc') +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +a int None +b int None + +# Partition Information +# col_name data_type comment + +c string None + +# Detailed Partition Information +Partition Value: [abc] +Database: default +Table: alter_table_partition_clusterby_sortby +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: -- Table properties should be unchanged + +desc formatted alter_table_partition_clusterby_sortby +PREHOOK: type: DESCTABLE +POSTHOOK: query: -- Table properties should be unchanged + +desc formatted alter_table_partition_clusterby_sortby +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +a int None +b int None + +# Partition Information +# col_name data_type comment + +c string None + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + SORTBUCKETCOLSPREFIX TRUE +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 4 +Bucket Columns: [a, b] +Sort Columns: [Order(col:a, order:0), Order(col:b, order:1)] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table alter_table_partition_clusterby_sortby +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@alter_table_partition_clusterby_sortby +PREHOOK: Output: default@alter_table_partition_clusterby_sortby +POSTHOOK: query: drop table alter_table_partition_clusterby_sortby +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@alter_table_partition_clusterby_sortby +POSTHOOK: Output: default@alter_table_partition_clusterby_sortby Index: ql/src/test/results/clientpositive/alter_table_not_sorted.q.out =================================================================== --- ql/src/test/results/clientpositive/alter_table_not_sorted.q.out (revision 0) +++ ql/src/test/results/clientpositive/alter_table_not_sorted.q.out (working copy) @@ -0,0 +1,81 @@ +PREHOOK: query: create table alter_table_not_sorted (a int, b int) clustered by (a) sorted by (a) into 4 buckets +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table alter_table_not_sorted (a int, b int) clustered by (a) sorted by (a) into 4 buckets +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@alter_table_not_sorted +PREHOOK: query: desc formatted alter_table_not_sorted +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc formatted alter_table_not_sorted +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +a int None +b int None + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + SORTBUCKETCOLSPREFIX TRUE +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 4 +Bucket Columns: [a] +Sort Columns: [Order(col:a, order:1)] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: alter table alter_table_not_sorted not sorted +PREHOOK: type: ALTERTABLE_CLUSTER_SORT +PREHOOK: Input: default@alter_table_not_sorted +PREHOOK: Output: default@alter_table_not_sorted +POSTHOOK: query: alter table alter_table_not_sorted not sorted +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT +POSTHOOK: Input: default@alter_table_not_sorted +POSTHOOK: Output: default@alter_table_not_sorted +PREHOOK: query: desc formatted alter_table_not_sorted +PREHOOK: type: DESCTABLE +POSTHOOK: query: desc formatted alter_table_not_sorted +POSTHOOK: type: DESCTABLE +# col_name data_type comment + +a int None +b int None + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + SORTBUCKETCOLSPREFIX TRUE +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: 4 +Bucket Columns: [a] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: drop table alter_table_not_sorted +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@alter_table_not_sorted +PREHOOK: Output: default@alter_table_not_sorted +POSTHOOK: query: drop table alter_table_not_sorted +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@alter_table_not_sorted +POSTHOOK: Output: default@alter_table_not_sorted Index: ql/src/test/queries/clientpositive/alter_table_not_sorted.q =================================================================== --- ql/src/test/queries/clientpositive/alter_table_not_sorted.q (revision 0) +++ ql/src/test/queries/clientpositive/alter_table_not_sorted.q (working copy) @@ -0,0 +1,7 @@ +create table alter_table_not_sorted (a int, b int) clustered by (a) sorted by (a) into 4 buckets; +desc formatted alter_table_not_sorted; + +alter table alter_table_not_sorted not sorted; +desc formatted alter_table_not_sorted; + +drop table alter_table_not_sorted; Index: ql/src/test/queries/clientpositive/alter_partition_clusterby_sortby.q =================================================================== --- ql/src/test/queries/clientpositive/alter_partition_clusterby_sortby.q (revision 0) +++ ql/src/test/queries/clientpositive/alter_partition_clusterby_sortby.q (working copy) @@ -0,0 +1,23 @@ +create table alter_table_partition_clusterby_sortby (a int, b int) partitioned by (c string) clustered by (a, b) sorted by (a desc, b asc) into 4 buckets; +alter table alter_table_partition_clusterby_sortby add partition(c='abc'); + +-- Turn off sorting for a partition + +alter table alter_table_partition_clusterby_sortby partition(c='abc') not sorted; +desc formatted alter_table_partition_clusterby_sortby partition(c='abc'); + +-- Modify clustering for a partition + +alter table alter_table_partition_clusterby_sortby partition(c='abc') clustered by (b) sorted by (b desc) into 4 buckets; +desc formatted alter_table_partition_clusterby_sortby partition(c='abc'); + +-- Turn off clustering for a partition + +alter table alter_table_partition_clusterby_sortby partition(c='abc') not clustered; +desc formatted alter_table_partition_clusterby_sortby partition(c='abc'); + +-- Table properties should be unchanged + +desc formatted alter_table_partition_clusterby_sortby; + +drop table alter_table_partition_clusterby_sortby; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1437106) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -3155,29 +3155,28 @@ // validate sort columns and bucket columns List columns = Utilities.getColumnNamesFromFieldSchema(tbl .getCols()); - Utilities.validateColumnNames(columns, alterTbl.getBucketColumns()); + if (!alterTbl.isTurnOffSorting()) { + Utilities.validateColumnNames(columns, alterTbl.getBucketColumns()); + } if (alterTbl.getSortColumns() != null) { Utilities.validateColumnNames(columns, Utilities .getColumnNamesFromSortCols(alterTbl.getSortColumns())); } - int numBuckets = -1; - ArrayList bucketCols = null; - ArrayList sortCols = null; + StorageDescriptor sd = part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd(); - // -1 buckets means to turn off bucketing - if (alterTbl.getNumberBuckets() == -1) { - bucketCols = new ArrayList(); - sortCols = new ArrayList(); - numBuckets = -1; + if (alterTbl.isTurnOffSorting()) { + sd.setSortCols(new ArrayList()); + } else if (alterTbl.getNumberBuckets() == -1) { + // -1 buckets means to turn off bucketing + sd.setBucketCols(new ArrayList()); + sd.setNumBuckets(-1); + sd.setSortCols(new ArrayList()); } else { - bucketCols = alterTbl.getBucketColumns(); - sortCols = alterTbl.getSortColumns(); - numBuckets = alterTbl.getNumberBuckets(); + sd.setBucketCols(alterTbl.getBucketColumns()); + sd.setNumBuckets(alterTbl.getNumberBuckets()); + sd.setSortCols(alterTbl.getSortColumns()); } - tbl.getTTable().getSd().setBucketCols(bucketCols); - tbl.getTTable().getSd().setNumBuckets(numBuckets); - tbl.getTTable().getSd().setSortCols(sortCols); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) { String newLocation = alterTbl.getNewLocation(); try { Index: ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (revision 1437106) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (working copy) @@ -87,6 +87,7 @@ List> skewedColValues; Table table; boolean isDropIfExists = false; + boolean isTurnOffSorting = false; public AlterTableDesc() { } @@ -180,14 +181,22 @@ } public AlterTableDesc(String tableName, int numBuckets, - List bucketCols, List sortCols) { + List bucketCols, List sortCols, HashMap partSpec) { oldName = tableName; op = AlterTableTypes.ADDCLUSTERSORTCOLUMN; numberBuckets = numBuckets; bucketColumns = new ArrayList(bucketCols); sortColumns = new ArrayList(sortCols); + this.partSpec = partSpec; } + public AlterTableDesc(String tableName, boolean sortingOff, HashMap partSpec) { + oldName = tableName; + op = AlterTableTypes.ADDCLUSTERSORTCOLUMN; + isTurnOffSorting = sortingOff; + this.partSpec = partSpec; + } + public AlterTableDesc(String tableName, String newLocation, HashMap partSpec) { op = AlterTableTypes.ALTERLOCATION; @@ -589,6 +598,13 @@ } /** + * @return isTurnOffSorting + */ + public boolean isTurnOffSorting() { + return isTurnOffSorting; + } + + /** * @return the turnOffSkewed */ public boolean isTurnOffSkewed() { Index: ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java (revision 1437106) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java (working copy) @@ -48,7 +48,8 @@ ALTERPARTITION_SERIALIZER("ALTERPARTITION_SERIALIZER", new Privilege[]{Privilege.ALTER_METADATA}, null), ALTERTABLE_SERDEPROPERTIES("ALTERTABLE_SERDEPROPERTIES", new Privilege[]{Privilege.ALTER_METADATA}, null), ALTERPARTITION_SERDEPROPERTIES("ALTERPARTITION_SERDEPROPERTIES", new Privilege[]{Privilege.ALTER_METADATA}, null), - ALTERTABLE_CLUSTER_SORT("ALTERTABLE_CLUSTER_SORT", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_CLUSTER_SORT("ALTERTABLE_CLUSTER_SORT", + new Privilege[]{Privilege.ALTER_METADATA}, null), ANALYZE_TABLE("ANALYZE_TABLE", null, null), ALTERTABLE_BUCKETNUM("ALTERTABLE_BUCKETNUM", new Privilege[]{Privilege.ALTER_METADATA}, null), Index: ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (revision 1437106) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (working copy) @@ -184,6 +184,8 @@ TOK_NO_DROP; TOK_STORAGEHANDLER; TOK_ALTERTABLE_CLUSTER_SORT; +TOK_NOT_CLUSTERED; +TOK_NOT_SORTED; TOK_TABCOLNAME; TOK_TABLELOCATION; TOK_PARTITIONLOCATION; @@ -624,7 +626,6 @@ | alterStatementSuffixUnArchive | alterStatementSuffixProperties | alterTblPartitionStatement - | alterStatementSuffixClusterbySortby | alterStatementSuffixSkewedby ; @@ -793,6 +794,7 @@ | alterStatementSuffixRenamePart | alterStatementSuffixBucketNum | alterTblPartitionStatementSuffixSkewedLocation + | alterStatementSuffixClusterbySortby ; alterStatementSuffixFileFormat @@ -802,6 +804,14 @@ -> ^(TOK_ALTERTABLE_FILEFORMAT fileFormat) ; +alterStatementSuffixClusterbySortby +@init {msgs.push("alter partition cluster by sort by statement");} +@after {msgs.pop();} + : KW_NOT KW_CLUSTERED -> ^(TOK_ALTERTABLE_CLUSTER_SORT TOK_NOT_CLUSTERED) + | KW_NOT KW_SORTED -> ^(TOK_ALTERTABLE_CLUSTER_SORT TOK_NOT_SORTED) + | tableBuckets -> ^(TOK_ALTERTABLE_CLUSTER_SORT tableBuckets) + ; + alterTblPartitionStatementSuffixSkewedLocation @init {msgs.push("alter partition skewed location");} @after {msgs.pop();} @@ -894,16 +904,6 @@ -> ^(TOK_TABLEBUCKETS $num) ; -alterStatementSuffixClusterbySortby -@init {msgs.push("alter cluster by sort by statement");} -@after{msgs.pop();} - :name=Identifier tableBuckets - ->^(TOK_ALTERTABLE_CLUSTER_SORT $name tableBuckets) - | - name=Identifier KW_NOT KW_CLUSTERED - ->^(TOK_ALTERTABLE_CLUSTER_SORT $name) - ; - fileFormat @init { msgs.push("file format specification"); } @after { msgs.pop(); } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (revision 1437106) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (working copy) @@ -58,7 +58,6 @@ commandType.put(HiveParser.TOK_ALTERTABLE_UNARCHIVE, HiveOperation.ALTERTABLE_UNARCHIVE); commandType.put(HiveParser.TOK_ALTERTABLE_PROPERTIES, HiveOperation.ALTERTABLE_PROPERTIES); commandType.put(HiveParser.TOK_DROPTABLE_PROPERTIES, HiveOperation.ALTERTABLE_PROPERTIES); - commandType.put(HiveParser.TOK_ALTERTABLE_CLUSTER_SORT, HiveOperation.ALTERTABLE_CLUSTER_SORT); commandType.put(HiveParser.TOK_SHOWDATABASES, HiveOperation.SHOWDATABASES); commandType.put(HiveParser.TOK_SHOWTABLES, HiveOperation.SHOWTABLES); commandType.put(HiveParser.TOK_SHOWCOLUMNS, HiveOperation.SHOWCOLUMNS); @@ -126,6 +125,9 @@ tablePartitionCommandType.put(HiveParser.TOK_TABLEBUCKETS, new HiveOperation[] {HiveOperation.ALTERTABLE_BUCKETNUM, HiveOperation.ALTERPARTITION_BUCKETNUM}); + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_CLUSTER_SORT, + new HiveOperation[] {HiveOperation.ALTERTABLE_CLUSTER_SORT, + HiveOperation.ALTERTABLE_CLUSTER_SORT}); } public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) Index: ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (revision 1437106) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (working copy) @@ -218,6 +218,8 @@ analyzeAlterTableSkewedLocation(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_TABLEBUCKETS) { analyzeAlterTableBucketNum(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_CLUSTER_SORT) { + analyzeAlterTableClusterSort(ast, tableName, partSpec); } break; } @@ -334,9 +336,6 @@ case HiveParser.TOK_DROPTABLE_PROPERTIES: analyzeAlterTableProps(ast, false, true); break; - case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT: - analyzeAlterTableClusterSort(ast); - break; case HiveParser.TOK_ALTERINDEX_REBUILD: analyzeAlterIndexRebuild(ast); break; @@ -1388,24 +1387,23 @@ } } - private void analyzeAlterTableClusterSort(ASTNode ast) - throws SemanticException { - String tableName = getUnescapedName((ASTNode)ast.getChild(0)); - Table tab = getTable(tableName, true); + private void analyzeAlterTableClusterSort(ASTNode ast, String tableName, + HashMap partSpec) throws SemanticException { + addInputsOutputsAlterTable(tableName, partSpec); - inputs.add(new ReadEntity(tab)); - outputs.add(new WriteEntity(tab)); - - validateAlterTableType(tab, AlterTableTypes.ADDCLUSTERSORTCOLUMN); - - if (ast.getChildCount() == 1) { - // This means that we want to turn off bucketing - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, -1, - new ArrayList(), new ArrayList()); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc), conf)); - } else { - ASTNode buckets = (ASTNode) ast.getChild(1); + AlterTableDesc alterTblDesc; + switch (ast.getChild(0).getType()) { + case HiveParser.TOK_NOT_CLUSTERED: + alterTblDesc = new AlterTableDesc(tableName, -1, new ArrayList(), + new ArrayList(), partSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); + break; + case HiveParser.TOK_NOT_SORTED: + alterTblDesc = new AlterTableDesc(tableName, true, partSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); + break; + case HiveParser.TOK_TABLEBUCKETS: + ASTNode buckets = (ASTNode) ast.getChild(0); List bucketCols = getColumnNames((ASTNode) buckets.getChild(0)); List sortCols = new ArrayList(); int numBuckets = -1; @@ -1419,10 +1417,11 @@ throw new SemanticException(ErrorMsg.INVALID_BUCKET_NUMBER.getMsg()); } - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, numBuckets, - bucketCols, sortCols); + alterTblDesc = new AlterTableDesc(tableName, numBuckets, + bucketCols, sortCols, partSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); + break; } }