diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index cfece77..5a4110c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -2728,6 +2728,21 @@ private int showLocksNewFormat(ShowLocksDesc showLocks, HiveLockManager lm) } private int showCompactions(Hive db, ShowCompactionsDesc desc) throws HiveException { + if(desc.getFilterExpr() != null) { + /*turn the filter into a String that can be passed to underlying DB... + * but it may not be a DB in which case it may not understand the string + * So it would have to be parsed again + * Actually, ASTNode is serializable + * + * Perhaps a different way to do this is to make the syntax + * SHOW COMPACTIONS WHERE where the is not parsed by HiveParser + * (is that possible?) and pass that as a string into TxnHandler + * Then use something like + * ExpressionTree exprTree = PartFilterExprUtil.getFilterParser("year = 2015 and state = 'CA'").tree; + * and then find + * */ + ; + } // Call the metastore to get the currently queued and running compactions. ShowCompactResponse rsp = db.showCompactions(); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index c7389a8..1098fe0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -2484,7 +2484,16 @@ private void analyzeLockTable(ASTNode ast) * @throws SemanticException Parsing failed. */ private void analyzeShowCompactions(ASTNode ast) throws SemanticException { - ShowCompactionsDesc desc = new ShowCompactionsDesc(ctx.getResFile()); + ShowCompactionsDesc desc = null; + if(ast.getChildCount() == 1) { + Tree whereNode = ast.getChild(0); + assert whereNode.getType() == HiveParser.TOK_WHERE; + assert whereNode.getChildCount() == 1; + desc = new ShowCompactionsDesc(ctx.getResFile(), (ASTNode) whereNode.getChild(0)); + } + else { + desc = new ShowCompactionsDesc(ctx.getResFile()); + } rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf)); setFetchTask(createFetchTask(desc.getSchema())); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 55915a6..8b14f5e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -1507,7 +1507,7 @@ showStatement ) | KW_SHOW (showOptions=KW_FORMATTED)? (KW_INDEX|KW_INDEXES) KW_ON showStmtIdentifier ((KW_FROM|KW_IN) db_name=identifier)? -> ^(TOK_SHOWINDEXES showStmtIdentifier $showOptions? $db_name?) - | KW_SHOW KW_COMPACTIONS -> ^(TOK_SHOW_COMPACTIONS) + | KW_SHOW KW_COMPACTIONS whereClause? -> ^(TOK_SHOW_COMPACTIONS whereClause?) | KW_SHOW KW_TRANSACTIONS -> ^(TOK_SHOW_TRANSACTIONS) | KW_SHOW KW_CONF StringLiteral -> ^(TOK_SHOWCONF StringLiteral) ; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCompactionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCompactionsDesc.java index 94fd289..1990bd7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCompactionsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCompactionsDesc.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.plan; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.parse.ASTNode; import java.io.Serializable; @@ -31,13 +32,19 @@ "starttime#string:string:string:string:string:string:string"; private String resFile; + private ASTNode filterExpr; /** * * @param resFile File that results of show will be written to. + * @param filterExpr the "where" clause of SHOW COMPACTIONS */ - public ShowCompactionsDesc(Path resFile) { + public ShowCompactionsDesc(Path resFile, ASTNode filterExpr) { this.resFile = resFile.toString(); + this.filterExpr = filterExpr; + } + public ShowCompactionsDesc(Path resFile) { + this(resFile, null); } /** @@ -53,4 +60,5 @@ public String getSchema() { public String getResFile() { return resFile; } + public ASTNode getFilterExpr() { return filterExpr; } } diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java index 68af15a..bf314db 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java @@ -75,7 +75,8 @@ ACIDTBL("acidTbl"), ACIDTBL2("acidTbl2"), NONACIDORCTBL("nonAcidOrcTbl"), - NONACIDORCTBL2("nonAcidOrcTbl2"); + NONACIDORCTBL2("nonAcidOrcTbl2"), + ACIDTBLPART("AcidTblPart"); private final String name; @Override @@ -115,6 +116,7 @@ public void setUp() throws Exception { runStatementOnDriver("create table " + Table.NONACIDORCTBL + "(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='false')"); runStatementOnDriver("create table " + Table.NONACIDORCTBL2 + "(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='false')"); runStatementOnDriver("create temporary table " + Table.ACIDTBL2 + "(a int, b int, c int) clustered by (c) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + runStatementOnDriver("create table " + Table.ACIDTBLPART + "(a int, b int) partitioned by (p string) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='true')"); } private void dropTables() throws Exception { for(Table t : Table.values()) { @@ -615,4 +617,37 @@ public void testSpecialChar() throws Exception { "\nwhen matched then update set vc=`∆∋` " + "\nwhen not matched then insert values(`a/b`.`g/h`,`a/b`.j,`a/b`.k)"); } + @Test + public void testShowCompactions() throws Exception { + d.destroy(); + hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + d = new Driver(hiveConf); + //generate some compaction history + runStatementOnDriver("insert into " + Table.ACIDTBLPART + " PARTITION(p) " + + " values(1,2,'p1'),(3,4,'p1'),(1,2,'p2'),(3,4,'p2'),(1,2,'p3'),(3,4,'p3')"); + runStatementOnDriver("alter table "+ Table.ACIDTBLPART + " PARTITION(p='p1') compact 'MAJOR'"); + TestTxnCommands2.runWorker(hiveConf); + runStatementOnDriver("alter table "+ Table.ACIDTBLPART + " PARTITION(p='p2') compact 'MAJOR'"); + TestTxnCommands2.runWorker(hiveConf); + runStatementOnDriver("alter table "+ Table.ACIDTBLPART + " PARTITION(p='p3') compact 'MAJOR'"); + TestTxnCommands2.runWorker(hiveConf); + runStatementOnDriver("insert into " + Table.ACIDTBLPART + " PARTITION(p) " + + " values(4,5,'p1'),(6,7,'p1'),(4,5,'p2'),(6,7,'p2'),(4,5,'p3'),(6,7,'p3')"); + runStatementOnDriver("alter table "+ Table.ACIDTBLPART + " PARTITION(p='p1') compact 'MAJOR'"); + TestTxnCommands2.runWorker(hiveConf); + runStatementOnDriver("alter table "+ Table.ACIDTBLPART + " PARTITION(p='p2') compact 'MAJOR'"); + TestTxnCommands2.runWorker(hiveConf); + runStatementOnDriver("alter table "+ Table.ACIDTBLPART + " PARTITION(p='p3') compact 'MAJOR'"); + TestTxnCommands2.runWorker(hiveConf); + TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf); + + TestTxnCommands2.checkCompactionState( + new TestTxnCommands2.CompactionsByState(0,0,0,6,/*ready to clean*/0,0,6), + TestTxnCommands2.countCompacts(txnHandler)); + + List r = runStatementOnDriver("SHOW COMPACTIONS"); + Assert.assertEquals(7, r.size());//includes Header row + r = runStatementOnDriver("SHOW COMPACTIONS WHERE part='p=p1'"); + Assert.assertEquals(3, r.size());//includes Header row + } } diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java index 49ba667..ea1240b 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java @@ -865,7 +865,7 @@ public void testInsertOverwriteWithSelfJoin() throws Exception { Assert.assertEquals("Insert overwrite partition failed", stringifyValues(updatedData), rs2); //insert overwrite not supported for ACID tables } - private static void checkCompactionState(CompactionsByState expected, CompactionsByState actual) { + static void checkCompactionState(CompactionsByState expected, CompactionsByState actual) { Assert.assertEquals(TxnStore.ATTEMPTED_RESPONSE, expected.attempted, actual.attempted); Assert.assertEquals(TxnStore.FAILED_RESPONSE, expected.failed, actual.failed); Assert.assertEquals(TxnStore.INITIATED_RESPONSE, expected.initiated, actual.initiated); @@ -1004,7 +1004,7 @@ private int getFileSystemCacheSize() throws Exception { return 0; } - private static class CompactionsByState { + static class CompactionsByState { private int attempted; private int failed; private int initiated; @@ -1025,7 +1025,7 @@ private int getFileSystemCacheSize() throws Exception { this.total = total; } } - private static CompactionsByState countCompacts(TxnStore txnHandler) throws MetaException { + static CompactionsByState countCompacts(TxnStore txnHandler) throws MetaException { ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest()); CompactionsByState compactionsByState = new CompactionsByState(); compactionsByState.total = resp.getCompactsSize();