diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 7e78cd5..e185f12 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; import org.apache.hadoop.hive.metastore.api.HiveObjectType; @@ -75,10 +76,13 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; +import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.TxnInfo; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -142,6 +146,7 @@ import org.apache.hadoop.hive.ql.plan.RevokeDesc; import org.apache.hadoop.hive.ql.plan.RoleDDLDesc; import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc; +import org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc; import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc; import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; @@ -152,6 +157,7 @@ import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc; +import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc; @@ -330,6 +336,8 @@ public int execute(DriverContext driverContext) { return archive(db, simpleDesc, driverContext); } else if (simpleDesc.getType() == AlterTableTypes.UNARCHIVE) { return unarchive(db, simpleDesc); + } else if (simpleDesc.getType() == AlterTableTypes.COMPACT) { + return compact(db, simpleDesc); } } @@ -383,7 +391,17 @@ public int execute(DriverContext driverContext) { return showLocks(showLocks); } - LockTableDesc lockTbl = work.getLockTblDesc(); + ShowCompactionsDesc compactionsDesc = work.getShowCompactionsDesc(); + if (compactionsDesc != null) { + return showCompactions(compactionsDesc); + } + + ShowTxnsDesc txnsDesc = work.getShowTxnsDesc(); + if (txnsDesc != null) { + return showTxns(txnsDesc); + } + + LockTableDesc lockTbl = work.getLockTblDesc(); if (lockTbl != null) { return lockTable(lockTbl); } @@ -1895,6 +1913,34 @@ private void msckAddPartitionsOneByOne(Hive db, Table table, } } + private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException { + + String dbName = desc.getDbName(); + String tblName = desc.getTableName(); + + Table tbl = db.getTable(dbName, tblName); + + String partName = null; + if (desc.getPartSpec() == null) { + // Compaction can only be done on the whole table if the table is non-partitioned. + if (tbl.isPartitioned()) { + throw new HiveException(ErrorMsg.NO_COMPACTION_PARTITION); + } + } else { + Map partSpec = desc.getPartSpec(); + List partitions = db.getPartitions(tbl, partSpec); + if (partitions.size() > 1) { + throw new HiveException(ErrorMsg.TOO_MANY_COMPACTION_PARTITIONS); + } else if (partitions.size() == 0) { + throw new HiveException(ErrorMsg.INVALID_PARTITION_SPEC); + } + partName = partitions.get(0).getName(); + } + db.compact(tbl.getDbName(), tbl.getTableName(), partName, desc.getCompactionType()); + console.printInfo("Compaction enqueued."); + return 0; + } + /** * MetastoreCheck, see if the data in the metastore matches what is on the * dfs. Current version checks for tables and partitions that are either @@ -2567,7 +2613,6 @@ private int showLocks(ShowLocksDesc showLocks) throws HiveException { locks = lockMgr.getLocks(false, isExt); } else { - // TODO make this work locks = lockMgr.getLocks(getHiveObject(showLocks.getTableName(), showLocks.getPartSpec()), true, isExt); @@ -2719,6 +2764,102 @@ private int showLocksNewFormat(ShowLocksDesc showLocks, HiveLockManager lm) return 0; } + private int showCompactions(ShowCompactionsDesc desc) throws HiveException { + // Call the metastore to get the currently queued and running compactions. + ShowCompactResponse rsp = db.showCompactions(); + + // Write the results into the file + DataOutputStream os = null; + try { + Path resFile = new Path(desc.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + os = fs.create(resFile); + + // Write a header + os.writeBytes("Database"); + os.write(separator); + os.writeBytes("Table"); + os.write(separator); + os.writeBytes("Partition"); + os.write(separator); + os.writeBytes("Type"); + os.write(separator); + os.writeBytes("State"); + os.write(separator); + os.writeBytes("Worker"); + os.write(separator); + os.writeBytes("Start Time"); + os.write(terminator); + + for (ShowCompactResponseElement e : rsp.getCompacts()) { + os.writeBytes(e.getDbname()); + os.write(separator); + os.writeBytes(e.getTablename()); + os.write(separator); + String part = e.getPartitionname(); + os.writeBytes(part == null ? "NULL" : part); + os.write(separator); + os.writeBytes(e.getType().toString()); + os.write(separator); + os.writeBytes(e.getState()); + os.write(separator); + String wid = e.getWorkerid(); + os.writeBytes(wid == null ? "NULL" : wid); + os.write(separator); + os.writeBytes(Long.toString(e.getStart())); + os.write(terminator); + } + os.close(); + } catch (IOException e) { + LOG.warn("show compactions: " + stringifyException(e)); + return 1; + } finally { + IOUtils.closeStream((FSDataOutputStream)os); + } + return 0; + } + + private int showTxns(ShowTxnsDesc desc) throws HiveException { + // Call the metastore to get the currently queued and running compactions. + GetOpenTxnsInfoResponse rsp = db.showTransactions(); + + // Write the results into the file + DataOutputStream os = null; + try { + Path resFile = new Path(desc.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + os = fs.create(resFile); + + // Write a header + os.writeBytes("Transaction ID"); + os.write(separator); + os.writeBytes("Transaction State"); + os.write(separator); + os.writeBytes("User"); + os.write(separator); + os.writeBytes("Hostname"); + os.write(terminator); + + for (TxnInfo txn : rsp.getOpen_txns()) { + os.writeBytes(Long.toString(txn.getId())); + os.write(separator); + os.writeBytes(txn.getState().toString()); + os.write(separator); + os.writeBytes(txn.getUser()); + os.write(separator); + os.writeBytes(txn.getHostname()); + os.write(terminator); + } + os.close(); + } catch (IOException e) { + LOG.warn("show transactions: " + stringifyException(e)); + return 1; + } finally { + IOUtils.closeStream((FSDataOutputStream)os); + } + return 0; + } + /** * Lock the table/partition specified * diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index ed03fba..ace6cb5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -68,9 +68,11 @@ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; import org.apache.hadoop.hive.metastore.api.HiveObjectType; @@ -84,6 +86,7 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -2593,6 +2596,47 @@ public void cancelDelegationToken(String tokenStrForm) } } + /** + * Enqueue a compaction request. + * @param dbname name of the database, if null default will be used. + * @param tableName name of the table, cannot be null + * @param partName name of the partition, if null table will be compacted (valid only for + * non-partitioned tables). + * @param compactType major or minor + * @throws HiveException + */ + public void compact(String dbname, String tableName, String partName, String compactType) + throws HiveException { + try { + CompactionType cr = null; + if ("major".equals(compactType)) cr = CompactionType.MAJOR; + else if ("minor".equals(compactType)) cr = CompactionType.MINOR; + else throw new RuntimeException("Unknown compaction type " + compactType); + getMSC().compact(dbname, tableName, partName, cr); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + } + + public ShowCompactResponse showCompactions() throws HiveException { + try { + return getMSC().showCompactions(); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + } + + public GetOpenTxnsInfoResponse showTransactions() throws HiveException { + try { + return getMSC().showTxns(); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + } + public static String[] getQualifiedNames(String qualifiedName) { return qualifiedName.split("\\."); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index e642919..56cbcf8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -70,6 +70,9 @@ import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.ql.io.RCFileInputFormat; import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.lockmgr.LockException; +import org.apache.hadoop.hive.ql.lockmgr.TxnManagerFactory; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; @@ -112,6 +115,7 @@ import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc; import org.apache.hadoop.hive.ql.plan.RoleDDLDesc; import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc; +import org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc; import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc; import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; @@ -122,6 +126,7 @@ import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc; +import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc; import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; @@ -263,6 +268,8 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { analyzeAlterTableBucketNum(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_CLUSTER_SORT) { analyzeAlterTableClusterSort(ast, tableName, partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_COMPACT) { + analyzeAlterTableCompact(ast, tableName, partSpec); } break; } @@ -314,6 +321,14 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { ctx.setResFile(ctx.getLocalTmpPath()); analyzeShowDbLocks(ast); break; + case HiveParser.TOK_SHOW_COMPACTIONS: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowCompactions(ast); + break; + case HiveParser.TOK_SHOW_TRANSACTIONS: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowTxns(ast); + break; case HiveParser.TOK_DESCFUNCTION: ctx.setResFile(ctx.getLocalTmpPath()); analyzeDescFunction(ast); @@ -1627,6 +1642,24 @@ private void analyzeAlterTableClusterSort(ASTNode ast, String tableName, } } + private void analyzeAlterTableCompact(ASTNode ast, String tableName, + HashMap partSpec) throws SemanticException { + + String type = unescapeSQLString(ast.getChild(0).getText()).toLowerCase(); + + if (!type.equals("minor") && !type.equals("major")) { + throw new SemanticException(ErrorMsg.INVALID_COMPACTION_TYPE.getMsg()); + } + + LinkedHashMap newPartSpec = null; + if (partSpec != null) newPartSpec = new LinkedHashMap(partSpec); + + AlterTableSimpleDesc desc = new AlterTableSimpleDesc(SessionState.get().getCurrentDatabase(), + tableName, newPartSpec, type); + + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf)); + } + static HashMap getProps(ASTNode prop) { HashMap mapProp = new HashMap(); readProps(prop, mapProp); @@ -2218,8 +2251,15 @@ private void analyzeShowLocks(ASTNode ast) throws SemanticException { } } + HiveTxnManager txnManager = null; + try { + txnManager = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); + } catch (LockException e) { + throw new SemanticException(e.getMessage()); + } + ShowLocksDesc showLocksDesc = new ShowLocksDesc(ctx.getResFile(), tableName, - partSpec, isExtended); + partSpec, isExtended, txnManager.useNewShowLocksFormat()); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showLocksDesc), conf)); setFetchTask(createFetchTask(showLocksDesc.getSchema())); @@ -2241,8 +2281,15 @@ private void analyzeShowDbLocks(ASTNode ast) throws SemanticException { boolean isExtended = (ast.getChildCount() > 1); String dbName = stripQuotes(ast.getChild(0).getText()); + HiveTxnManager txnManager = null; + try { + txnManager = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); + } catch (LockException e) { + throw new SemanticException(e.getMessage()); + } + ShowLocksDesc showLocksDesc = new ShowLocksDesc(ctx.getResFile(), dbName, - isExtended); + isExtended, txnManager.useNewShowLocksFormat()); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showLocksDesc), conf)); setFetchTask(createFetchTask(showLocksDesc.getSchema())); @@ -2284,6 +2331,28 @@ private void analyzeLockTable(ASTNode ast) } /** + * Add a task to execute "SHOW COMPACTIONS" + * @param ast The parsed command tree. + * @throws SemanticException Parsing failed. + */ + private void analyzeShowCompactions(ASTNode ast) throws SemanticException { + ShowCompactionsDesc desc = new ShowCompactionsDesc(ctx.getResFile()); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf)); + setFetchTask(createFetchTask(desc.getSchema())); + } + + /** + * Add a task to execute "SHOW COMPACTIONS" + * @param ast The parsed command tree. + * @throws SemanticException Parsing failed. + */ + private void analyzeShowTxns(ASTNode ast) throws SemanticException { + ShowTxnsDesc desc = new ShowTxnsDesc(ctx.getResFile()); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf)); + setFetchTask(createFetchTask(desc.getSchema())); + } + + /** * Add the task according to the parsed command tree. This is used for the CLI * command "UNLOCK TABLE ..;". * diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g index b8de58c..3e673ca 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g @@ -291,7 +291,9 @@ KW_EXCHANGE: 'EXCHANGE'; KW_ADMIN: 'ADMIN'; KW_OWNER: 'OWNER'; KW_PRINCIPALS: 'PRINCIPALS'; - +KW_COMPACT: 'COMPACT'; +KW_COMPACTIONS: 'COMPACTIONS'; +KW_TRANSACTIONS: 'TRANSACTIONS'; // Operators // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work. diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index f272cbb..13bbf0a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -328,6 +328,9 @@ TOK_FILE; TOK_JAR; TOK_RESOURCE_URI; TOK_RESOURCE_LIST; +TOK_COMPACT; +TOK_SHOW_COMPACTIONS; +TOK_SHOW_TRANSACTIONS; } @@ -1123,6 +1126,7 @@ alterTblPartitionStatementSuffix | alterStatementSuffixBucketNum | alterTblPartitionStatementSuffixSkewedLocation | alterStatementSuffixClusterbySortby + | alterStatementSuffixCompact ; alterStatementSuffixFileFormat @@ -1239,6 +1243,14 @@ alterStatementSuffixBucketNum -> ^(TOK_TABLEBUCKETS $num) ; +alterStatementSuffixCompact +@init { msgs.push("compaction request"); } +@after { msgs.pop(); } + : KW_COMPACT compactType=StringLiteral + -> ^(TOK_COMPACT $compactType) + ; + + fileFormat @init { pushMsg("file format specification", state); } @after { popMsg(state); } @@ -1309,6 +1321,8 @@ showStatement | KW_SHOW KW_LOCKS KW_DATABASE (dbName=Identifier) (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWDBLOCKS $dbName $isExtended?) | KW_SHOW (showOptions=KW_FORMATTED)? (KW_INDEX|KW_INDEXES) KW_ON showStmtIdentifier ((KW_FROM|KW_IN) db_name=identifier)? -> ^(TOK_SHOWINDEXES showStmtIdentifier $showOptions? $db_name?) + | KW_SHOW KW_COMPACTIONS -> ^(TOK_SHOW_COMPACTIONS) + | KW_SHOW KW_TRANSACTIONS -> ^(TOK_SHOW_TRANSACTIONS) ; lockStatement diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g index 81ca600..864e692 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g @@ -538,5 +538,5 @@ functionIdentifier nonReserved : - KW_TRUE | KW_FALSE | KW_LIKE | KW_EXISTS | KW_ASC | KW_DESC | KW_ORDER | KW_GROUP | KW_BY | KW_AS | KW_INSERT | KW_OVERWRITE | KW_OUTER | KW_LEFT | KW_RIGHT | KW_FULL | KW_PARTITION | KW_PARTITIONS | KW_TABLE | KW_TABLES | KW_COLUMNS | KW_INDEX | KW_INDEXES | KW_REBUILD | KW_FUNCTIONS | KW_SHOW | KW_MSCK | KW_REPAIR | KW_DIRECTORY | KW_LOCAL | KW_USING | KW_CLUSTER | KW_DISTRIBUTE | KW_SORT | KW_UNION | KW_LOAD | KW_EXPORT | KW_IMPORT | KW_DATA | KW_INPATH | KW_IS | KW_NULL | KW_CREATE | KW_EXTERNAL | KW_ALTER | KW_CHANGE | KW_FIRST | KW_AFTER | KW_DESCRIBE | KW_DROP | KW_RENAME | KW_IGNORE | KW_PROTECTION | KW_TO | KW_COMMENT | KW_BOOLEAN | KW_TINYINT | KW_SMALLINT | KW_INT | KW_BIGINT | KW_FLOAT | KW_DOUBLE | KW_DATE | KW_DATETIME | KW_TIMESTAMP | KW_DECIMAL | KW_STRING | KW_ARRAY | KW_STRUCT | KW_UNIONTYPE | KW_PARTITIONED | KW_CLUSTERED | KW_SORTED | KW_INTO | KW_BUCKETS | KW_ROW | KW_ROWS | KW_FORMAT | KW_DELIMITED | KW_FIELDS | KW_TERMINATED | KW_ESCAPED | KW_COLLECTION | KW_ITEMS | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_STORED | KW_FILEFORMAT | KW_SEQUENCEFILE | KW_TEXTFILE | KW_RCFILE | KW_ORCFILE | KW_PARQUETFILE | KW_INPUTFORMAT | KW_OUTPUTFORMAT | KW_INPUTDRIVER | KW_OUTPUTDRIVER | KW_OFFLINE | KW_ENABLE | KW_DISABLE | KW_READONLY | KW_NO_DROP | KW_LOCATION | KW_BUCKET | KW_OUT | KW_OF | KW_PERCENT | KW_ADD | KW_REPLACE | KW_RLIKE | KW_REGEXP | KW_TEMPORARY | KW_EXPLAIN | KW_FORMATTED | KW_PRETTY | KW_DEPENDENCY | KW_LOGICAL | KW_SERDE | KW_WITH | KW_DEFERRED | KW_SERDEPROPERTIES | KW_DBPROPERTIES | KW_LIMIT | KW_SET | KW_UNSET | KW_TBLPROPERTIES | KW_IDXPROPERTIES | KW_VALUE_TYPE | KW_ELEM_TYPE | KW_MAPJOIN | KW_STREAMTABLE | KW_HOLD_DDLTIME | KW_CLUSTERSTATUS | KW_UTC | KW_UTCTIMESTAMP | KW_LONG | KW_DELETE | KW_PLUS | KW_MINUS | KW_FETCH | KW_INTERSECT | KW_VIEW | KW_IN | KW_DATABASES | KW_MATERIALIZED | KW_SCHEMA | KW_SCHEMAS | KW_GRANT | KW_REVOKE | KW_SSL | KW_UNDO | KW_LOCK | KW_LOCKS | KW_UNLOCK | KW_SHARED | KW_EXCLUSIVE | KW_PROCEDURE | KW_UNSIGNED | KW_WHILE | KW_READ | KW_READS | KW_PURGE | KW_RANGE | KW_ANALYZE | KW_BEFORE | KW_BETWEEN | KW_BOTH | KW_BINARY | KW_CONTINUE | KW_CURSOR | KW_TRIGGER | KW_RECORDREADER | KW_RECORDWRITER | KW_SEMI | KW_LATERAL | KW_TOUCH | KW_ARCHIVE | KW_UNARCHIVE | KW_COMPUTE | KW_STATISTICS | KW_USE | KW_OPTION | KW_CONCATENATE | KW_SHOW_DATABASE | KW_UPDATE | KW_RESTRICT | KW_CASCADE | KW_SKEWED | KW_ROLLUP | KW_CUBE | KW_DIRECTORIES | KW_FOR | KW_GROUPING | KW_SETS | KW_TRUNCATE | KW_NOSCAN | KW_USER | KW_ROLE | KW_ROLES | KW_INNER | KW_DEFINED | KW_ADMIN | KW_JAR | KW_FILE | KW_OWNER | KW_PRINCIPALS | KW_ALL | KW_DEFAULT | KW_NONE + KW_TRUE | KW_FALSE | KW_LIKE | KW_EXISTS | KW_ASC | KW_DESC | KW_ORDER | KW_GROUP | KW_BY | KW_AS | KW_INSERT | KW_OVERWRITE | KW_OUTER | KW_LEFT | KW_RIGHT | KW_FULL | KW_PARTITION | KW_PARTITIONS | KW_TABLE | KW_TABLES | KW_COLUMNS | KW_INDEX | KW_INDEXES | KW_REBUILD | KW_FUNCTIONS | KW_SHOW | KW_MSCK | KW_REPAIR | KW_DIRECTORY | KW_LOCAL | KW_USING | KW_CLUSTER | KW_DISTRIBUTE | KW_SORT | KW_UNION | KW_LOAD | KW_EXPORT | KW_IMPORT | KW_DATA | KW_INPATH | KW_IS | KW_NULL | KW_CREATE | KW_EXTERNAL | KW_ALTER | KW_CHANGE | KW_FIRST | KW_AFTER | KW_DESCRIBE | KW_DROP | KW_RENAME | KW_IGNORE | KW_PROTECTION | KW_TO | KW_COMMENT | KW_BOOLEAN | KW_TINYINT | KW_SMALLINT | KW_INT | KW_BIGINT | KW_FLOAT | KW_DOUBLE | KW_DATE | KW_DATETIME | KW_TIMESTAMP | KW_DECIMAL | KW_STRING | KW_ARRAY | KW_STRUCT | KW_UNIONTYPE | KW_PARTITIONED | KW_CLUSTERED | KW_SORTED | KW_INTO | KW_BUCKETS | KW_ROW | KW_ROWS | KW_FORMAT | KW_DELIMITED | KW_FIELDS | KW_TERMINATED | KW_ESCAPED | KW_COLLECTION | KW_ITEMS | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_STORED | KW_FILEFORMAT | KW_SEQUENCEFILE | KW_TEXTFILE | KW_RCFILE | KW_ORCFILE | KW_PARQUETFILE | KW_INPUTFORMAT | KW_OUTPUTFORMAT | KW_INPUTDRIVER | KW_OUTPUTDRIVER | KW_OFFLINE | KW_ENABLE | KW_DISABLE | KW_READONLY | KW_NO_DROP | KW_LOCATION | KW_BUCKET | KW_OUT | KW_OF | KW_PERCENT | KW_ADD | KW_REPLACE | KW_RLIKE | KW_REGEXP | KW_TEMPORARY | KW_EXPLAIN | KW_FORMATTED | KW_PRETTY | KW_DEPENDENCY | KW_LOGICAL | KW_SERDE | KW_WITH | KW_DEFERRED | KW_SERDEPROPERTIES | KW_DBPROPERTIES | KW_LIMIT | KW_SET | KW_UNSET | KW_TBLPROPERTIES | KW_IDXPROPERTIES | KW_VALUE_TYPE | KW_ELEM_TYPE | KW_MAPJOIN | KW_STREAMTABLE | KW_HOLD_DDLTIME | KW_CLUSTERSTATUS | KW_UTC | KW_UTCTIMESTAMP | KW_LONG | KW_DELETE | KW_PLUS | KW_MINUS | KW_FETCH | KW_INTERSECT | KW_VIEW | KW_IN | KW_DATABASES | KW_MATERIALIZED | KW_SCHEMA | KW_SCHEMAS | KW_GRANT | KW_REVOKE | KW_SSL | KW_UNDO | KW_LOCK | KW_LOCKS | KW_UNLOCK | KW_SHARED | KW_EXCLUSIVE | KW_PROCEDURE | KW_UNSIGNED | KW_WHILE | KW_READ | KW_READS | KW_PURGE | KW_RANGE | KW_ANALYZE | KW_BEFORE | KW_BETWEEN | KW_BOTH | KW_BINARY | KW_CONTINUE | KW_CURSOR | KW_TRIGGER | KW_RECORDREADER | KW_RECORDWRITER | KW_SEMI | KW_LATERAL | KW_TOUCH | KW_ARCHIVE | KW_UNARCHIVE | KW_COMPUTE | KW_STATISTICS | KW_USE | KW_OPTION | KW_CONCATENATE | KW_SHOW_DATABASE | KW_UPDATE | KW_RESTRICT | KW_CASCADE | KW_SKEWED | KW_ROLLUP | KW_CUBE | KW_DIRECTORIES | KW_FOR | KW_GROUPING | KW_SETS | KW_TRUNCATE | KW_NOSCAN | KW_USER | KW_ROLE | KW_ROLES | KW_INNER | KW_DEFINED | KW_ADMIN | KW_JAR | KW_FILE | KW_OWNER | KW_PRINCIPALS | KW_ALL | KW_DEFAULT | KW_NONE | KW_COMPACT | KW_COMPACTIONS | KW_TRANSACTIONS ; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 5d16626..e7d0359 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -106,6 +106,8 @@ commandType.put(HiveParser.TOK_ANALYZE, HiveOperation.ANALYZE_TABLE); commandType.put(HiveParser.TOK_ALTERVIEW_RENAME, HiveOperation.ALTERVIEW_RENAME); commandType.put(HiveParser.TOK_ALTERTABLE_PARTCOLTYPE, HiveOperation.ALTERTABLE_PARTCOLTYPE); + commandType.put(HiveParser.TOK_SHOW_COMPACTIONS, HiveOperation.SHOW_COMPACTIONS); + commandType.put(HiveParser.TOK_SHOW_TRANSACTIONS, HiveOperation.SHOW_TRANSACTIONS); } static { @@ -130,6 +132,8 @@ HiveOperation.ALTERPARTITION_SERDEPROPERTIES }); tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_RENAMEPART, new HiveOperation[] {null, HiveOperation.ALTERTABLE_RENAMEPART}); + tablePartitionCommandType.put(HiveParser.TOK_COMPACT, + new HiveOperation[] {null, HiveOperation.ALTERTABLE_COMPACT}); tablePartitionCommandType.put(HiveParser.TOK_ALTERTBLPART_SKEWED_LOCATION, new HiveOperation[] {HiveOperation.ALTERTBLPART_SKEWED_LOCATION, HiveOperation.ALTERTBLPART_SKEWED_LOCATION }); @@ -195,6 +199,8 @@ public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) case HiveParser.TOK_SHOWINDEXES: case HiveParser.TOK_SHOWLOCKS: case HiveParser.TOK_SHOWDBLOCKS: + case HiveParser.TOK_SHOW_COMPACTIONS: + case HiveParser.TOK_SHOW_TRANSACTIONS: case HiveParser.TOK_CREATEINDEX: case HiveParser.TOK_DROPINDEX: case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT: diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index f6a3b43..20d863b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -48,7 +48,7 @@ ADDFILEFORMAT, ADDCLUSTERSORTCOLUMN, RENAMECOLUMN, ADDPARTITION, TOUCH, ARCHIVE, UNARCHIVE, ALTERPROTECTMODE, ALTERPARTITIONPROTECTMODE, ALTERLOCATION, DROPPARTITION, RENAMEPARTITION, ADDSKEWEDBY, ALTERSKEWEDLOCATION, - ALTERBUCKETNUM, ALTERPARTITION + ALTERBUCKETNUM, ALTERPARTITION, COMPACT } public static enum ProtectModeType { @@ -702,4 +702,5 @@ public boolean getIsDropIfExists() { return isDropIfExists; } + } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java index 278a2ef..541675c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.plan; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; @@ -30,6 +31,7 @@ private String tableName; private String dbName; private LinkedHashMap partSpec; + private String compactionType; AlterTableTypes type; @@ -57,6 +59,22 @@ public AlterTableSimpleDesc(String dbName, String tableName, this.type = type; } + /** + * Constructor for ALTER TABLE ... COMPACT. + * @param dbname name of the database containing the table + * @param tableName name of the table to compact + * @param partSpec partition to compact + * @param compactionType currently supported values: 'major' and 'minor' + */ + public AlterTableSimpleDesc(String dbname, String tableName, + LinkedHashMap partSpec, String compactionType) { + type = AlterTableTypes.COMPACT; + this.compactionType = compactionType; + this.dbName = dbname; + this.tableName = tableName; + this.partSpec = partSpec; + } + public String getTableName() { return tableName; } @@ -89,4 +107,12 @@ public void setPartSpec(LinkedHashMap partSpec) { this.partSpec = partSpec; } + /** + * Get what type of compaction is being done by a ALTER TABLE ... COMPACT statement. + * @return Compaction type, currently supported values are 'major' and 'minor'. + */ + public String getCompactionType() { + return compactionType; + } + } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index 409e0a7..bfe3e86 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -52,6 +52,8 @@ private UnlockTableDesc unlockTblDesc; private ShowFunctionsDesc showFuncsDesc; private ShowLocksDesc showLocksDesc; + private ShowCompactionsDesc showCompactionsDesc; + private ShowTxnsDesc showTxnsDesc; private DescFunctionDesc descFunctionDesc; private ShowPartitionsDesc showPartsDesc; private ShowCreateTableDesc showCreateTblDesc; @@ -323,7 +325,19 @@ public DDLWork(HashSet inputs, HashSet outputs, this.showLocksDesc = showLocksDesc; } - /** + public DDLWork(HashSet inputs, HashSet outputs, + ShowCompactionsDesc showCompactionsDesc) { + this(inputs, outputs); + this.showCompactionsDesc = showCompactionsDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + ShowTxnsDesc showTxnsDesc) { + this(inputs, outputs); + this.showTxnsDesc = showTxnsDesc; + } + + /** * @param descFuncDesc */ public DDLWork(HashSet inputs, HashSet outputs, @@ -711,6 +725,16 @@ public ShowLocksDesc getShowLocksDesc() { return showLocksDesc; } + @Explain(displayName = "Show Compactions Operator") + public ShowCompactionsDesc getShowCompactionsDesc() { + return showCompactionsDesc; + } + + @Explain(displayName = "Show Transactions Operator") + public ShowTxnsDesc getShowTxnsDesc() { + return showTxnsDesc; + } + /** * @return the lockTblDesc */ @@ -751,6 +775,14 @@ public void setShowLocksDesc(ShowLocksDesc showLocksDesc) { this.showLocksDesc = showLocksDesc; } + public void setShowCompactionsDesc(ShowCompactionsDesc showCompactionsDesc) { + this.showCompactionsDesc = showCompactionsDesc; + } + + public void setShowTxnsDesc(ShowTxnsDesc showTxnsDesc) { + this.showTxnsDesc = showTxnsDesc; + } + /** * @param lockTblDesc * the lockTblDesc to set diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java index 9b8a8e1..763f908 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java @@ -111,6 +111,10 @@ new Privilege[] {Privilege.ALTER_DATA}, null), ALTERTABLE_PARTCOLTYPE("ALTERTABLE_PARTCOLTYPE", new Privilege[] { Privilege.SELECT }, new Privilege[] { Privilege.ALTER_DATA }), ALTERVIEW_RENAME("ALTERVIEW_RENAME", new Privilege[] {Privilege.ALTER_METADATA}, null), + ALTERTABLE_COMPACT("ALTERTABLE_COMPACT", new Privilege[]{Privilege.SELECT}, + new Privilege[]{Privilege.ALTER_DATA}), + SHOW_COMPACTIONS("SHOW COMPACTIONS", null, null), + SHOW_TRANSACTIONS("SHOW TRANSACTIONS", null, null); ; private String operationName; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCompactionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCompactionsDesc.java new file mode 100644 index 0000000..94fd289 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCompactionsDesc.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.plan; + +import org.apache.hadoop.fs.Path; + +import java.io.Serializable; + +/** + * Descriptor for showing compactions. + */ +public class ShowCompactionsDesc extends DDLDesc implements Serializable { + + private static final long serialVersionUID = 1L; + private static final String schema = "dbname,tabname,partname,type,state,workerid," + + "starttime#string:string:string:string:string:string:string"; + + private String resFile; + + /** + * + * @param resFile File that results of show will be written to. + */ + public ShowCompactionsDesc(Path resFile) { + this.resFile = resFile.toString(); + } + + /** + * No arg constructor for serialization. + */ + public ShowCompactionsDesc() { + } + + public String getSchema() { + return schema; + } + + public String getResFile() { + return resFile; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java index 3eee8de..1902d36 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java @@ -22,6 +22,7 @@ import java.util.HashMap; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; /** * ShowLocksDesc. @@ -35,6 +36,7 @@ String tableName; HashMap partSpec; boolean isExt; + boolean isNewLockFormat; /** * table name for the result of show locks. @@ -45,6 +47,13 @@ */ private static final String schema = "tab_name,mode#string:string"; + /** + * Schema for use with db txn manager. + */ + private static final String newFormatSchema = "lockid,database,table,partition,lock_state," + + "lock_type,transaction_id,last_heartbeat,acquired_at,user," + + "hostname#string:string:string:string:string:string:string:string:string:string:string"; + public String getDatabase() { return dbName; } @@ -54,7 +63,8 @@ public String getTable() { } public String getSchema() { - return schema; + if (isNewLockFormat) return newFormatSchema; + else return schema; } public ShowLocksDesc() { @@ -63,23 +73,25 @@ public ShowLocksDesc() { /** * @param resFile */ - public ShowLocksDesc(Path resFile, String dbName, boolean isExt) { + public ShowLocksDesc(Path resFile, String dbName, boolean isExt, boolean isNewFormat) { this.resFile = resFile.toString(); this.partSpec = null; this.tableName = null; this.isExt = isExt; this.dbName = dbName; + isNewLockFormat = isNewFormat; } /** * @param resFile */ public ShowLocksDesc(Path resFile, String tableName, - HashMap partSpec, boolean isExt) { + HashMap partSpec, boolean isExt, boolean isNewFormat) { this.resFile = resFile.toString(); this.partSpec = partSpec; this.tableName = tableName; this.isExt = isExt; + isNewLockFormat = isNewFormat; } public String getDbName() { @@ -152,4 +164,8 @@ public boolean isExt() { public void setExt(boolean isExt) { this.isExt = isExt; } + + public boolean isNewFormat() { + return isNewLockFormat; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTxnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTxnsDesc.java new file mode 100644 index 0000000..c4508d0 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTxnsDesc.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.plan; + +import org.apache.hadoop.fs.Path; + +import java.io.Serializable; + +/** + * Descriptor for showing transactions. + */ +public class ShowTxnsDesc extends DDLDesc implements Serializable { + + private static final long serialVersionUID = 1L; + private static final String schema = "txnid,state,user,host#string:string:string:string"; + + private String resFile; + + /** + * + * @param resFile File that results of show will be written to. + */ + public ShowTxnsDesc(Path resFile) { + this.resFile = resFile.toString(); + } + + /** + * No arg constructor for serialization. + */ + public ShowTxnsDesc() { + } + + public String getSchema() { + return schema; + } + + public String getResFile() { + return resFile; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java index 774347d..00f5822 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java @@ -111,6 +111,9 @@ ALTERTABLE_SKEWED, ALTERTBLPART_SKEWED_LOCATION, ALTERVIEW_RENAME, + ALTERTABLE_COMPACT, + SHOW_COMPACTIONS, + SHOW_TRANSACTIONS } diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java index cc0d7d9..2f25df6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java @@ -117,6 +117,7 @@ op2Priv.put(HiveOperationType.ALTERPARTITION_MERGEFILES, new InOutPrivs(OWNER_PRIV_AR, OWNER_PRIV_AR)); op2Priv.put(HiveOperationType.ALTERTABLE_SKEWED, new InOutPrivs(OWNER_PRIV_AR, OWNER_PRIV_AR)); op2Priv.put(HiveOperationType.ALTERTBLPART_SKEWED_LOCATION, new InOutPrivs(OWNER_PRIV_AR, OWNER_INS_SEL_DEL_NOGRANT_AR)); + op2Priv.put(HiveOperationType.ALTERTABLE_COMPACT, new InOutPrivs(OWNER_PRIV_AR, OWNER_PRIV_AR)); op2Priv.put(HiveOperationType.TRUNCATETABLE, new InOutPrivs(OWNER_PRIV_AR, OWNER_PRIV_AR)); //table ownership for create/drop/alter index @@ -173,6 +174,9 @@ op2Priv.put(HiveOperationType.DROPFUNCTION, new InOutPrivs(null, ADMIN_PRIV_AR)); op2Priv.put(HiveOperationType.CREATEMACRO, new InOutPrivs(null, ADMIN_PRIV_AR)); op2Priv.put(HiveOperationType.DROPMACRO, new InOutPrivs(null, ADMIN_PRIV_AR)); + op2Priv.put(HiveOperationType.SHOW_COMPACTIONS, new InOutPrivs(null, null)); + op2Priv.put(HiveOperationType.SHOW_TRANSACTIONS, new InOutPrivs(null, null)); + op2Priv.put(HiveOperationType.DROPFUNCTION, new InOutPrivs(null, null)); op2Priv.put(HiveOperationType.LOCKTABLE, new InOutPrivs(null, null)); op2Priv.put(HiveOperationType.UNLOCKTABLE, new InOutPrivs(null, null)); diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java new file mode 100644 index 0000000..5f32d5f --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java @@ -0,0 +1,131 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse; + +import junit.framework.Assert; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; +import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; +import org.apache.hadoop.hive.ql.plan.DDLWork; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Tests for parsing and semantic analysis of ALTER TABLE ... compact. + */ +public class TestQBCompact { + static HiveConf conf; + + @BeforeClass + public static void init() throws Exception { + conf = new HiveConf(); + SessionState.start(conf); + + // Create a table so we can work against it + Hive h = Hive.get(conf); + List cols = new ArrayList(); + cols.add("a"); + List partCols = new ArrayList(); + partCols.add("ds"); + h.createTable("foo", cols, partCols, OrcInputFormat.class, OrcOutputFormat.class); + Table t = h.getTable("foo"); + Map partSpec = new HashMap(); + partSpec.put("ds", "today"); + h.createPartition(t, partSpec); + } + + private AlterTableSimpleDesc parseAndAnalyze(String query) throws Exception { + ParseDriver hd = new ParseDriver(); + ASTNode head = (ASTNode)hd.parse(query).getChild(0); + System.out.println("HERE " + head.dump()); + BaseSemanticAnalyzer a = SemanticAnalyzerFactory.get(conf, head); + a.analyze(head, new Context(conf)); + List> roots = a.getRootTasks(); + Assert.assertEquals(1, roots.size()); + return ((DDLWork)roots.get(0).getWork()).getAlterTblSimpleDesc(); + } + + + @Test + public void testNonPartitionedTable() throws Exception { + boolean sawException = false; + AlterTableSimpleDesc desc = parseAndAnalyze("alter table foo compact 'major'"); + Assert.assertEquals("major", desc.getCompactionType()); + Assert.assertEquals("foo", desc.getTableName()); + Assert.assertEquals("default", desc.getDbName()); + } + + @Test + public void testBogusLevel() throws Exception { + boolean sawException = false; + try { + parseAndAnalyze("alter table foo partition(ds = 'today') compact 'bogus'"); + } catch (SemanticException e) { + sawException = true; + Assert.assertEquals(ErrorMsg.INVALID_COMPACTION_TYPE.getMsg(), e.getMessage()); + } + Assert.assertTrue(sawException); + } + + @Test + public void testMajor() throws Exception { + AlterTableSimpleDesc desc = + parseAndAnalyze("alter table foo partition(ds = 'today') compact 'major'"); + Assert.assertEquals("major", desc.getCompactionType()); + Assert.assertEquals("foo", desc.getTableName()); + Assert.assertEquals("default", desc.getDbName()); + HashMap parts = desc.getPartSpec(); + Assert.assertEquals(1, parts.size()); + Assert.assertEquals("today", parts.get("ds")); + } + + @Test + public void testMinor() throws Exception { + AlterTableSimpleDesc desc = + parseAndAnalyze("alter table foo partition(ds = 'today') compact 'minor'"); + Assert.assertEquals("minor", desc.getCompactionType()); + Assert.assertEquals("foo", desc.getTableName()); + Assert.assertEquals("default", desc.getDbName()); + HashMap parts = desc.getPartSpec(); + Assert.assertEquals(1, parts.size()); + Assert.assertEquals("today", parts.get("ds")); + } + + @Test + public void showCompactions() throws Exception { + parseAndAnalyze("show compactions"); + } + + @Test + public void showTxns() throws Exception { + parseAndAnalyze("show transactions"); + } +} diff --git ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q new file mode 100644 index 0000000..6612fe8 --- /dev/null +++ ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q @@ -0,0 +1,12 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.txn.testing=true; + +create table T1(key string, val string) stored as textfile; + +set hive.txn.testing=true; +alter table T1 compact 'major'; + +alter table T1 compact 'minor'; + +drop table T1; diff --git ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q new file mode 100644 index 0000000..599cad9 --- /dev/null +++ ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q @@ -0,0 +1,14 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.txn.testing=true; + +create table T1(key string, val string) partitioned by (ds string) stored as textfile; + +alter table T1 add partition (ds = 'today'); +alter table T1 add partition (ds = 'yesterday'); + +alter table T1 partition (ds = 'today') compact 'major'; + +alter table T1 partition (ds = 'yesterday') compact 'minor'; + +drop table T1; diff --git ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q new file mode 100644 index 0000000..871d292 --- /dev/null +++ ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q @@ -0,0 +1,15 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.txn.testing=true; + +create database D1; + +use D1; + +create table T1(key string, val string) stored as textfile; + +alter table T1 compact 'major'; + +alter table T1 compact 'minor'; + +drop table T1; diff --git ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q new file mode 100644 index 0000000..7c71fdd --- /dev/null +++ ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q @@ -0,0 +1,11 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.txn.testing=true; + +show locks; + +show locks extended; + +show locks default; + +show transactions; diff --git ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out new file mode 100644 index 0000000..0c9c2b1 --- /dev/null +++ ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out @@ -0,0 +1,23 @@ +PREHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table T1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T1 +PREHOOK: query: alter table T1 compact 'major' +PREHOOK: type: null +POSTHOOK: query: alter table T1 compact 'major' +POSTHOOK: type: null +PREHOOK: query: alter table T1 compact 'minor' +PREHOOK: type: null +POSTHOOK: query: alter table T1 compact 'minor' +POSTHOOK: type: null +PREHOOK: query: drop table T1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@t1 +PREHOOK: Output: default@t1 +POSTHOOK: query: drop table T1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@t1 diff --git ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out new file mode 100644 index 0000000..f7c7394 --- /dev/null +++ ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out @@ -0,0 +1,37 @@ +PREHOOK: query: create table T1(key string, val string) partitioned by (ds string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table T1(key string, val string) partitioned by (ds string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T1 +PREHOOK: query: alter table T1 add partition (ds = 'today') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@t1 +POSTHOOK: query: alter table T1 add partition (ds = 'today') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1@ds=today +PREHOOK: query: alter table T1 add partition (ds = 'yesterday') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@t1 +POSTHOOK: query: alter table T1 add partition (ds = 'yesterday') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1@ds=yesterday +PREHOOK: query: alter table T1 partition (ds = 'today') compact 'major' +PREHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: query: alter table T1 partition (ds = 'today') compact 'major' +POSTHOOK: type: ALTERTABLE_COMPACT +PREHOOK: query: alter table T1 partition (ds = 'yesterday') compact 'minor' +PREHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: query: alter table T1 partition (ds = 'yesterday') compact 'minor' +POSTHOOK: type: ALTERTABLE_COMPACT +PREHOOK: query: drop table T1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@t1 +PREHOOK: Output: default@t1 +POSTHOOK: query: drop table T1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@t1 diff --git ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out new file mode 100644 index 0000000..b279dcd --- /dev/null +++ ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out @@ -0,0 +1,31 @@ +PREHOOK: query: create database D1 +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: create database D1 +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: use D1 +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: use D1 +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:d1 +POSTHOOK: query: create table T1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: D1@T1 +POSTHOOK: Output: database:d1 +PREHOOK: query: alter table T1 compact 'major' +PREHOOK: type: null +POSTHOOK: query: alter table T1 compact 'major' +POSTHOOK: type: null +PREHOOK: query: alter table T1 compact 'minor' +PREHOOK: type: null +POSTHOOK: query: alter table T1 compact 'minor' +POSTHOOK: type: null +PREHOOK: query: drop table T1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: d1@t1 +PREHOOK: Output: d1@t1 +POSTHOOK: query: drop table T1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: d1@t1 +POSTHOOK: Output: d1@t1 diff --git ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out new file mode 100644 index 0000000..d9d2ed6 --- /dev/null +++ ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out @@ -0,0 +1,20 @@ +PREHOOK: query: show locks +PREHOOK: type: SHOWLOCKS +POSTHOOK: query: show locks +POSTHOOK: type: SHOWLOCKS +Lock ID Database Table Partition State Type Transaction ID Last Hearbeat Acquired At User Hostname +PREHOOK: query: show locks extended +PREHOOK: type: SHOWLOCKS +POSTHOOK: query: show locks extended +POSTHOOK: type: SHOWLOCKS +Lock ID Database Table Partition State Type Transaction ID Last Hearbeat Acquired At User Hostname +PREHOOK: query: show locks default +PREHOOK: type: SHOWLOCKS +POSTHOOK: query: show locks default +POSTHOOK: type: SHOWLOCKS +Lock ID Database Table Partition State Type Transaction ID Last Hearbeat Acquired At User Hostname +PREHOOK: query: show transactions +PREHOOK: type: SHOW TRANSACTIONS +POSTHOOK: query: show transactions +POSTHOOK: type: SHOW TRANSACTIONS +Transaction ID Transaction State User Hostname