diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 92ed55b..2b96455 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -48,11 +48,8 @@ import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FsShell; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -60,24 +57,7 @@ import org.apache.hadoop.hive.metastore.ProtectMode; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -89,71 +69,17 @@ import org.apache.hadoop.hive.ql.io.rcfile.merge.MergeWork; import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateTask; import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateWork; -import org.apache.hadoop.hive.ql.lockmgr.HiveLock; -import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager; -import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode; -import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject; +import org.apache.hadoop.hive.ql.lockmgr.*; import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData; -import org.apache.hadoop.hive.ql.metadata.CheckResult; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker; -import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; -import org.apache.hadoop.hive.ql.metadata.HiveUtils; -import org.apache.hadoop.hive.ql.metadata.InvalidTableException; +import org.apache.hadoop.hive.ql.metadata.*; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatUtils; import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatter; import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; -import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; -import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.AlterIndexDesc; -import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc; +import org.apache.hadoop.hive.ql.plan.*; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; -import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition; -import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; -import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; -import org.apache.hadoop.hive.ql.plan.CreateTableDesc; -import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; -import org.apache.hadoop.hive.ql.plan.CreateViewDesc; -import org.apache.hadoop.hive.ql.plan.DDLWork; -import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; -import org.apache.hadoop.hive.ql.plan.DescTableDesc; -import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.DropIndexDesc; -import org.apache.hadoop.hive.ql.plan.DropTableDesc; -import org.apache.hadoop.hive.ql.plan.GrantDesc; -import org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL; -import org.apache.hadoop.hive.ql.plan.LockDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.LockTableDesc; -import org.apache.hadoop.hive.ql.plan.MsckDesc; -import org.apache.hadoop.hive.ql.plan.PartitionSpec; -import org.apache.hadoop.hive.ql.plan.PrincipalDesc; -import org.apache.hadoop.hive.ql.plan.PrivilegeDesc; -import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc; -import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc; -import org.apache.hadoop.hive.ql.plan.RevokeDesc; -import org.apache.hadoop.hive.ql.plan.RoleDDLDesc; -import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc; -import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc; -import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; -import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; -import org.apache.hadoop.hive.ql.plan.ShowGrantDesc; -import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc; -import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; -import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; -import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; -import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; -import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc; -import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc; -import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.security.authorization.Privilege; import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizer; @@ -177,6 +103,15 @@ import org.apache.hadoop.util.ToolRunner; import org.stringtemplate.v4.ST; +import java.io.*; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.*; +import java.util.Map.Entry; + +import static org.apache.commons.lang.StringUtils.join; +import static org.apache.hadoop.util.StringUtils.stringifyException; + /** * DDLTask implementation. * @@ -323,6 +258,8 @@ public int execute(DriverContext driverContext) { return archive(db, simpleDesc, driverContext); } else if (simpleDesc.getType() == AlterTableTypes.UNARCHIVE) { return unarchive(db, simpleDesc); + } else if (simpleDesc.getType() == AlterTableTypes.COMPACT) { + return compact(db, simpleDesc); } } @@ -376,7 +313,17 @@ public int execute(DriverContext driverContext) { return showLocks(showLocks); } - LockTableDesc lockTbl = work.getLockTblDesc(); + ShowCompactionsDesc compactionsDesc = work.getShowCompactionsDesc(); + if (compactionsDesc != null) { + return showCompactions(compactionsDesc); + } + + ShowTxnsDesc txnsDesc = work.getShowTxnsDesc(); + if (txnsDesc != null) { + return showTxns(txnsDesc); + } + + LockTableDesc lockTbl = work.getLockTblDesc(); if (lockTbl != null) { return lockTable(lockTbl); } @@ -1012,7 +959,7 @@ private int createIndex(Hive db, CreateIndexDesc crtIndex) throws HiveException MetaStoreUtils.getIndexTableName(SessionState.get().getCurrentDatabase(), crtIndex.getTableName(), crtIndex.getIndexName()); Table indexTable = db.getTable(indexTableName); - work.getOutputs().add(new WriteEntity(indexTable)); + work.getOutputs().add(new WriteEntity(indexTable, WriteEntity.WriteType.DDL)); } return 0; } @@ -1105,7 +1052,7 @@ private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException private int addPartitions(Hive db, AddPartitionDesc addPartitionDesc) throws HiveException { List parts = db.createPartitions(addPartitionDesc); for (Partition part : parts) { - work.getOutputs().add(new WriteEntity(part)); + work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.INSERT)); } return 0; } @@ -1131,7 +1078,7 @@ private int renamePartition(Hive db, RenamePartitionDesc renamePartitionDesc) th Partition newPart = db .getPartition(tbl, renamePartitionDesc.getNewPartSpec(), false); work.getInputs().add(new ReadEntity(oldPart)); - work.getOutputs().add(new WriteEntity(newPart)); + work.getOutputs().add(new WriteEntity(newPart, WriteEntity.WriteType.DDL)); return 0; } @@ -1173,7 +1120,7 @@ private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionD } work.getInputs().add(new ReadEntity(tbl)); - work.getOutputs().add(new WriteEntity(tbl)); + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL)); return 0; } @@ -1202,7 +1149,7 @@ private int touch(Hive db, AlterTableSimpleDesc touchDesc) throw new HiveException("Uable to update table"); } work.getInputs().add(new ReadEntity(tbl)); - work.getOutputs().add(new WriteEntity(tbl)); + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_METADATA_ONLY)); } else { Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false); if (part == null) { @@ -1214,7 +1161,7 @@ private int touch(Hive db, AlterTableSimpleDesc touchDesc) throw new HiveException(e); } work.getInputs().add(new ReadEntity(part)); - work.getOutputs().add(new WriteEntity(part)); + work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.DDL_METADATA_ONLY)); } return 0; } @@ -1796,6 +1743,34 @@ private void checkArchiveProperty(int partSpecLevel, } } + private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException { + + String dbName = desc.getDbName(); + String tblName = desc.getTableName(); + + Table tbl = db.getTable(dbName, tblName); + + String partName = null; + if (desc.getPartSpec() == null) { + // Compaction can only be done on the whole table if the table is non-partitioned. + if (tbl.isPartitioned()) { + throw new HiveException(ErrorMsg.NO_COMPACTION_PARTITION); + } + } else { + Map partSpec = desc.getPartSpec(); + List partitions = db.getPartitions(tbl, partSpec); + if (partitions.size() > 1) { + throw new HiveException(ErrorMsg.TOO_MANY_COMPACTION_PARTITIONS); + } else if (partitions.size() == 0) { + throw new HiveException(ErrorMsg.INVALID_PARTITION_SPEC); + } + partName = partitions.get(0).getName(); + } + db.compact(tbl.getDbName(), tbl.getTableName(), partName, desc.getCompactionType()); + console.printInfo("Compaction enqueued."); + return 0; + } + /** * MetastoreCheck, see if the data in the metastore matches what is on the * dfs. Current version checks for tables and partitions that are either @@ -2436,7 +2411,11 @@ private int showFunctions(ShowFunctionsDesc showFuncs) throws HiveException { */ private int showLocks(ShowLocksDesc showLocks) throws HiveException { Context ctx = driverContext.getCtx(); - HiveLockManager lockMgr = ctx.getHiveLockMgr(); + HiveTxnManager txnManager = ctx.getHiveTxnManager(); + HiveLockManager lockMgr = txnManager.getLockManager(); + + if (txnManager.useNewShowLocksFormat()) return showLocksNewFormat(showLocks, lockMgr); + boolean isExt = showLocks.isExt(); if (lockMgr == null) { throw new HiveException("show Locks LockManager not specified"); @@ -2451,9 +2430,12 @@ private int showLocks(ShowLocksDesc showLocks) throws HiveException { List locks = null; if (showLocks.getTableName() == null) { + // TODO should be doing security check here. Users should not be + // able to see each other's locks. locks = lockMgr.getLocks(false, isExt); } else { + // TODO make this work locks = lockMgr.getLocks(getHiveObject(showLocks.getTableName(), showLocks.getPartSpec()), true, isExt); @@ -2517,7 +2499,191 @@ public int compare(HiveLock o1, HiveLock o2) { return 0; } - /** + private int showLocksNewFormat(ShowLocksDesc showLocks, HiveLockManager lm) + throws HiveException { + + DbLockManager lockMgr; + if (!(lm instanceof DbLockManager)) { + throw new RuntimeException("New lock format only supported with db lock manager."); + } + lockMgr = (DbLockManager)lm; + + ShowLocksResponse rsp = lockMgr.getLocks(); + + // write the results in the file + DataOutputStream os = null; + try { + Path resFile = new Path(showLocks.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + os = fs.create(resFile); + + // Write a header + os.writeBytes("Lock ID"); + os.write(separator); + os.writeBytes("Database"); + os.write(separator); + os.writeBytes("Table"); + os.write(separator); + os.writeBytes("Partition"); + os.write(separator); + os.writeBytes("State"); + os.write(separator); + os.writeBytes("Type"); + os.write(separator); + os.writeBytes("Transaction ID"); + os.write(separator); + os.writeBytes("Last Hearbeat"); + os.write(separator); + os.writeBytes("Acquired At"); + os.write(separator); + os.writeBytes("User"); + os.write(separator); + os.writeBytes("Hostname"); + os.write(terminator); + + List locks = rsp.getLocks(); + if (locks != null) { + for (ShowLocksResponseElement lock : locks) { + os.writeBytes(Long.toString(lock.getLockid())); + os.write(separator); + os.writeBytes(lock.getDbname()); + os.write(separator); + os.writeBytes((lock.getTablename() == null) ? "NULL" : lock.getTablename()); + os.write(separator); + os.writeBytes((lock.getPartname() == null) ? "NULL" : lock.getPartname()); + os.write(separator); + os.writeBytes(lock.getState().toString()); + os.write(separator); + os.writeBytes(lock.getType().toString()); + os.write(separator); + os.writeBytes((lock.getTxnid() == 0) ? "NULL" : Long.toString(lock.getTxnid())); + os.write(separator); + os.writeBytes(Long.toString(lock.getLastheartbeat())); + os.write(separator); + os.writeBytes((lock.getAcquiredat() == 0) ? "NULL" : Long.toString(lock.getAcquiredat())); + os.write(separator); + os.writeBytes(lock.getUser()); + os.write(separator); + os.writeBytes(lock.getHostname()); + os.write(separator); + os.write(terminator); + } + + } + + os.close(); + os = null; + } catch (FileNotFoundException e) { + LOG.warn("show function: " + stringifyException(e)); + return 1; + } catch (IOException e) { + LOG.warn("show function: " + stringifyException(e)); + return 1; + } catch (Exception e) { + throw new HiveException(e.toString()); + } finally { + IOUtils.closeStream((FSDataOutputStream) os); + } + return 0; + } + + private int showCompactions(ShowCompactionsDesc desc) throws HiveException { + // Call the metastore to get the currently queued and running compactions. + ShowCompactResponse rsp = db.showCompactions(); + + // Write the results into the file + DataOutputStream os = null; + try { + Path resFile = new Path(desc.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + os = fs.create(resFile); + + // Write a header + os.writeBytes("Database"); + os.write(separator); + os.writeBytes("Table"); + os.write(separator); + os.writeBytes("Partition"); + os.write(separator); + os.writeBytes("Type"); + os.write(separator); + os.writeBytes("State"); + os.write(separator); + os.writeBytes("Worker"); + os.write(separator); + os.writeBytes("Start Time"); + os.write(terminator); + + for (ShowCompactResponseElement e : rsp.getCompacts()) { + os.writeBytes(e.getDbname()); + os.write(separator); + os.writeBytes(e.getTablename()); + os.write(separator); + String part = e.getPartitionname(); + os.writeBytes(part == null ? "NULL" : part); + os.write(separator); + os.writeBytes(e.getType().toString()); + os.write(separator); + os.writeBytes(e.getState()); + os.write(separator); + String wid = e.getWorkerid(); + os.writeBytes(wid == null ? "NULL" : wid); + os.write(separator); + os.writeBytes(Long.toString(e.getStart())); + os.write(terminator); + } + os.close(); + } catch (IOException e) { + LOG.warn("show compactions: " + stringifyException(e)); + return 1; + } finally { + IOUtils.closeStream((FSDataOutputStream)os); + } + return 0; + } + + private int showTxns(ShowTxnsDesc desc) throws HiveException { + // Call the metastore to get the currently queued and running compactions. + GetOpenTxnsInfoResponse rsp = db.showTransactions(); + + // Write the results into the file + DataOutputStream os = null; + try { + Path resFile = new Path(desc.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + os = fs.create(resFile); + + // Write a header + os.writeBytes("Transaction ID"); + os.write(separator); + os.writeBytes("Transaction State"); + os.write(separator); + os.writeBytes("User"); + os.write(separator); + os.writeBytes("Hostname"); + os.write(terminator); + + for (TxnInfo txn : rsp.getOpen_txns()) { + os.writeBytes(Long.toString(txn.getId())); + os.write(separator); + os.writeBytes(txn.getState().toString()); + os.write(separator); + os.writeBytes(txn.getUser()); + os.write(separator); + os.writeBytes(txn.getHostname()); + os.write(terminator); + } + os.close(); + } catch (IOException e) { + LOG.warn("show transactions: " + stringifyException(e)); + return 1; + } finally { + IOUtils.closeStream((FSDataOutputStream)os); + } + return 0; + } + + /** * Lock the table/partition specified * * @param lockTbl @@ -2528,7 +2694,12 @@ public int compare(HiveLock o1, HiveLock o2) { */ private int lockTable(LockTableDesc lockTbl) throws HiveException { Context ctx = driverContext.getCtx(); - HiveLockManager lockMgr = ctx.getHiveLockMgr(); + HiveTxnManager txnManager = ctx.getHiveTxnManager(); + if (!txnManager.supportsExplicitLock()) { + throw new HiveException(ErrorMsg.LOCK_REQUEST_UNSUPPORTED, + conf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER)); + } + HiveLockManager lockMgr = txnManager.getLockManager(); if (lockMgr == null) { throw new HiveException("lock Table LockManager not specified"); } @@ -2577,7 +2748,12 @@ private int lockTable(LockTableDesc lockTbl) throws HiveException { */ private int lockDatabase(LockDatabaseDesc lockDb) throws HiveException { Context ctx = driverContext.getCtx(); - HiveLockManager lockMgr = ctx.getHiveLockMgr(); + HiveTxnManager txnManager = ctx.getHiveTxnManager(); + if (!txnManager.supportsExplicitLock()) { + throw new HiveException(ErrorMsg.LOCK_REQUEST_UNSUPPORTED, + conf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER)); + } + HiveLockManager lockMgr = txnManager.getLockManager(); if (lockMgr == null) { throw new HiveException("lock Database LockManager not specified"); } @@ -2613,7 +2789,12 @@ private int lockDatabase(LockDatabaseDesc lockDb) throws HiveException { */ private int unlockDatabase(UnlockDatabaseDesc unlockDb) throws HiveException { Context ctx = driverContext.getCtx(); - HiveLockManager lockMgr = ctx.getHiveLockMgr(); + HiveTxnManager txnManager = ctx.getHiveTxnManager(); + if (!txnManager.supportsExplicitLock()) { + throw new HiveException(ErrorMsg.LOCK_REQUEST_UNSUPPORTED, + conf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER)); + } + HiveLockManager lockMgr = txnManager.getLockManager(); if (lockMgr == null) { throw new HiveException("unlock Database LockManager not specified"); } @@ -2671,7 +2852,12 @@ private HiveLockObject getHiveObject(String tabName, */ private int unlockTable(UnlockTableDesc unlockTbl) throws HiveException { Context ctx = driverContext.getCtx(); - HiveLockManager lockMgr = ctx.getHiveLockMgr(); + HiveTxnManager txnManager = ctx.getHiveTxnManager(); + if (!txnManager.supportsExplicitLock()) { + throw new HiveException(ErrorMsg.LOCK_REQUEST_UNSUPPORTED, + conf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER)); + } + HiveLockManager lockMgr = txnManager.getLockManager(); if (lockMgr == null) { throw new HiveException("unlock Table LockManager not specified"); } @@ -3386,7 +3572,7 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { } tbl.setNumBuckets(alterTbl.getNumberBuckets()); } - } else { + } else { throw new HiveException(ErrorMsg.UNSUPPORTED_ALTER_TBL_OP, alterTbl.getOp().toString()); } @@ -3423,17 +3609,17 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { // passed if(part != null) { work.getInputs().add(new ReadEntity(part)); - work.getOutputs().add(new WriteEntity(part)); + work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.DDL)); } else if (allPartitions != null ){ for (Partition tmpPart: allPartitions) { work.getInputs().add(new ReadEntity(tmpPart)); - work.getOutputs().add(new WriteEntity(tmpPart)); + work.getOutputs().add(new WriteEntity(tmpPart, WriteEntity.WriteType.DDL)); } } else { work.getInputs().add(new ReadEntity(oldTbl)); - work.getOutputs().add(new WriteEntity(tbl)); + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL)); } return 0; } @@ -3511,7 +3697,7 @@ private int dropTable(Hive db, DropTableDesc dropTbl) // drop the table db.dropTable(dropTbl.getTableName()); if (tbl != null) { - work.getOutputs().add(new WriteEntity(tbl)); + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL)); } } else { // This is actually an ALTER TABLE DROP PARTITION @@ -3565,7 +3751,7 @@ private int dropTable(Hive db, DropTableDesc dropTbl) for (Partition partition : partsToDelete) { console.printInfo("Dropping the partition " + partition.getName()); db.dropPartition(dropTbl.getTableName(), partition.getValues(), true); - work.getOutputs().add(new WriteEntity(partition)); + work.getOutputs().add(new WriteEntity(partition, WriteEntity.WriteType.DDL)); } } @@ -3846,7 +4032,7 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { // create the table db.createTable(tbl, crtTbl.getIfNotExists()); - work.getOutputs().add(new WriteEntity(tbl)); + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL)); return 0; } @@ -3954,7 +4140,7 @@ private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws HiveExce // create the table db.createTable(tbl, crtTbl.getIfNotExists()); - work.getOutputs().add(new WriteEntity(tbl)); + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL)); return 0; } @@ -3990,7 +4176,7 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { } catch (InvalidOperationException e) { throw new HiveException(e); } - work.getOutputs().add(new WriteEntity(oldview)); + work.getOutputs().add(new WriteEntity(oldview, WriteEntity.WriteType.DDL)); } else { // create new view Table tbl = db.newTable(crtView.getViewName()); @@ -4017,7 +4203,7 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { } db.createTable(tbl, crtView.getIfNotExists()); - work.getOutputs().add(new WriteEntity(tbl)); + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL)); } return 0; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index d32deea..7d305b8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -63,25 +63,7 @@ import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.index.HiveIndexHandler; import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils; @@ -2538,6 +2520,47 @@ public void cancelDelegationToken(String tokenStrForm) } } + /** + * Enqueue a compaction request. + * @param dbname name of the database, if null default will be used. + * @param tableName name of the table, cannot be null + * @param partName name of the partition, if null table will be compacted (valid only for + * non-partitioned tables). + * @param compactType major or minor + * @throws HiveException + */ + public void compact(String dbname, String tableName, String partName, String compactType) + throws HiveException { + try { + CompactionType cr = null; + if ("major".equals(compactType)) cr = CompactionType.MAJOR; + else if ("minor".equals(compactType)) cr = CompactionType.MINOR; + else throw new RuntimeException("Unknown compaction type " + compactType); + getMSC().compact(dbname, tableName, partName, cr); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + } + + public ShowCompactResponse showCompactions() throws HiveException { + try { + return getMSC().showCompactions(); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + } + + public GetOpenTxnsInfoResponse showTransactions() throws HiveException { + try { + return getMSC().showTxns(); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + } + private static String[] getQualifiedNames(String qualifiedName) { return qualifiedName.split("\\."); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g index f83c15d..389f790 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g @@ -284,6 +284,9 @@ KW_ROLES: 'ROLES'; KW_INNER: 'INNER'; KW_EXCHANGE: 'EXCHANGE'; KW_ADMIN: 'ADMIN'; +KW_COMPACT: 'COMPACT'; +KW_COMPACTIONS: 'COMPACTIONS'; +KW_TRANSACTIONS: 'TRANSACTIONS'; // Operators // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index c15c4b5..4d78f40 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -138,8 +138,9 @@ TOK_ALTERTABLE_RENAMEPART; TOK_ALTERTABLE_REPLACECOLS; TOK_ALTERTABLE_ADDPARTS; TOK_ALTERTABLE_DROPPARTS; -TOK_ALTERTABLE_ALTERPARTS; -TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE; +TOK_ALTERTABLE_PARTCOLTYPE; +TOK_ALTERTABLE_PROTECTMODE; +TOK_ALTERTABLE_MERGEFILES; TOK_ALTERTABLE_TOUCH; TOK_ALTERTABLE_ARCHIVE; TOK_ALTERTABLE_UNARCHIVE; @@ -285,7 +286,6 @@ TOK_DATABASEPROPERTIES; TOK_DATABASELOCATION; TOK_DBPROPLIST; TOK_ALTERDATABASE_PROPERTIES; -TOK_ALTERTABLE_ALTERPARTS_MERGEFILES; TOK_TABNAME; TOK_TABSRC; TOK_RESTRICT; @@ -314,6 +314,9 @@ TOK_SUBQUERY_OP_NOTIN; TOK_SUBQUERY_OP_NOTEXISTS; TOK_DB_TYPE; TOK_TABLE_TYPE; +TOK_COMPACT; +TOK_SHOW_COMPACTIONS; +TOK_SHOW_TRANSACTIONS; } @@ -900,8 +903,16 @@ alterTableStatementSuffix | alterTblPartitionStatement | alterStatementSuffixSkewedby | alterStatementSuffixExchangePartition + | alterStatementPartitionKeyType ; +alterStatementPartitionKeyType +@init {msgs.push("alter partition key type"); } +@after {msgs.pop();} + : identifier KW_PARTITION KW_COLUMN LPAREN columnNameType RPAREN + -> ^(TOK_ALTERTABLE_PARTCOLTYPE identifier columnNameType) + ; + alterViewStatementSuffix @init { msgs.push("alter view statement"); } @after { msgs.pop(); } @@ -1058,8 +1069,6 @@ alterTblPartitionStatement @after {msgs.pop();} : tablePartitionPrefix alterTblPartitionStatementSuffix -> ^(TOK_ALTERTABLE_PARTITION tablePartitionPrefix alterTblPartitionStatementSuffix) - |Identifier KW_PARTITION KW_COLUMN LPAREN columnNameType RPAREN - -> ^(TOK_ALTERTABLE_ALTERPARTS Identifier columnNameType) ; alterTblPartitionStatementSuffix @@ -1074,6 +1083,7 @@ alterTblPartitionStatementSuffix | alterStatementSuffixBucketNum | alterTblPartitionStatementSuffixSkewedLocation | alterStatementSuffixClusterbySortby + | alterStatementSuffixCompact ; alterStatementSuffixFileFormat @@ -1151,7 +1161,7 @@ alterStatementSuffixProtectMode @init { msgs.push("alter partition protect mode statement"); } @after { msgs.pop(); } : alterProtectMode - -> ^(TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE alterProtectMode) + -> ^(TOK_ALTERTABLE_PROTECTMODE alterProtectMode) ; alterStatementSuffixRenamePart @@ -1165,7 +1175,7 @@ alterStatementSuffixMergeFiles @init { msgs.push(""); } @after { msgs.pop(); } : KW_CONCATENATE - -> ^(TOK_ALTERTABLE_ALTERPARTS_MERGEFILES) + -> ^(TOK_ALTERTABLE_MERGEFILES) ; alterProtectMode @@ -1190,6 +1200,14 @@ alterStatementSuffixBucketNum -> ^(TOK_TABLEBUCKETS $num) ; +alterStatementSuffixCompact +@init { msgs.push("compaction request"); } +@after { msgs.pop(); } + : KW_COMPACT compactType=StringLiteral + -> ^(TOK_COMPACT $compactType) + ; + + fileFormat @init { msgs.push("file format specification"); } @after { msgs.pop(); } @@ -1259,6 +1277,8 @@ showStatement | KW_SHOW KW_LOCKS KW_DATABASE (dbName=Identifier) (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWDBLOCKS $dbName $isExtended?) | KW_SHOW (showOptions=KW_FORMATTED)? (KW_INDEX|KW_INDEXES) KW_ON showStmtIdentifier ((KW_FROM|KW_IN) db_name=identifier)? -> ^(TOK_SHOWINDEXES showStmtIdentifier $showOptions? $db_name?) + | KW_SHOW KW_COMPACTIONS -> ^(TOK_SHOW_COMPACTIONS) + | KW_SHOW KW_TRANSACTIONS -> ^(TOK_SHOW_TRANSACTIONS) ; lockStatement diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g index 4147503..7b6909b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g @@ -535,5 +535,5 @@ identifier nonReserved : - KW_TRUE | KW_FALSE | KW_LIKE | KW_EXISTS | KW_ASC | KW_DESC | KW_ORDER | KW_GROUP | KW_BY | KW_AS | KW_INSERT | KW_OVERWRITE | KW_OUTER | KW_LEFT | KW_RIGHT | KW_FULL | KW_PARTITION | KW_PARTITIONS | KW_TABLE | KW_TABLES | KW_COLUMNS | KW_INDEX | KW_INDEXES | KW_REBUILD | KW_FUNCTIONS | KW_SHOW | KW_MSCK | KW_REPAIR | KW_DIRECTORY | KW_LOCAL | KW_USING | KW_CLUSTER | KW_DISTRIBUTE | KW_SORT | KW_UNION | KW_LOAD | KW_EXPORT | KW_IMPORT | KW_DATA | KW_INPATH | KW_IS | KW_NULL | KW_CREATE | KW_EXTERNAL | KW_ALTER | KW_CHANGE | KW_FIRST | KW_AFTER | KW_DESCRIBE | KW_DROP | KW_RENAME | KW_IGNORE | KW_PROTECTION | KW_TO | KW_COMMENT | KW_BOOLEAN | KW_TINYINT | KW_SMALLINT | KW_INT | KW_BIGINT | KW_FLOAT | KW_DOUBLE | KW_DATE | KW_DATETIME | KW_TIMESTAMP | KW_DECIMAL | KW_STRING | KW_ARRAY | KW_STRUCT | KW_UNIONTYPE | KW_PARTITIONED | KW_CLUSTERED | KW_SORTED | KW_INTO | KW_BUCKETS | KW_ROW | KW_ROWS | KW_FORMAT | KW_DELIMITED | KW_FIELDS | KW_TERMINATED | KW_ESCAPED | KW_COLLECTION | KW_ITEMS | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_STORED | KW_FILEFORMAT | KW_SEQUENCEFILE | KW_TEXTFILE | KW_RCFILE | KW_ORCFILE | KW_INPUTFORMAT | KW_OUTPUTFORMAT | KW_INPUTDRIVER | KW_OUTPUTDRIVER | KW_OFFLINE | KW_ENABLE | KW_DISABLE | KW_READONLY | KW_NO_DROP | KW_LOCATION | KW_BUCKET | KW_OUT | KW_OF | KW_PERCENT | KW_ADD | KW_REPLACE | KW_RLIKE | KW_REGEXP | KW_TEMPORARY | KW_EXPLAIN | KW_FORMATTED | KW_PRETTY | KW_DEPENDENCY | KW_LOGICAL | KW_SERDE | KW_WITH | KW_DEFERRED | KW_SERDEPROPERTIES | KW_DBPROPERTIES | KW_LIMIT | KW_SET | KW_UNSET | KW_TBLPROPERTIES | KW_IDXPROPERTIES | KW_VALUE_TYPE | KW_ELEM_TYPE | KW_MAPJOIN | KW_STREAMTABLE | KW_HOLD_DDLTIME | KW_CLUSTERSTATUS | KW_UTC | KW_UTCTIMESTAMP | KW_LONG | KW_DELETE | KW_PLUS | KW_MINUS | KW_FETCH | KW_INTERSECT | KW_VIEW | KW_IN | KW_DATABASES | KW_MATERIALIZED | KW_SCHEMA | KW_SCHEMAS | KW_GRANT | KW_REVOKE | KW_SSL | KW_UNDO | KW_LOCK | KW_LOCKS | KW_UNLOCK | KW_SHARED | KW_EXCLUSIVE | KW_PROCEDURE | KW_UNSIGNED | KW_WHILE | KW_READ | KW_READS | KW_PURGE | KW_RANGE | KW_ANALYZE | KW_BEFORE | KW_BETWEEN | KW_BOTH | KW_BINARY | KW_CONTINUE | KW_CURSOR | KW_TRIGGER | KW_RECORDREADER | KW_RECORDWRITER | KW_SEMI | KW_LATERAL | KW_TOUCH | KW_ARCHIVE | KW_UNARCHIVE | KW_COMPUTE | KW_STATISTICS | KW_USE | KW_OPTION | KW_CONCATENATE | KW_SHOW_DATABASE | KW_UPDATE | KW_RESTRICT | KW_CASCADE | KW_SKEWED | KW_ROLLUP | KW_CUBE | KW_DIRECTORIES | KW_FOR | KW_GROUPING | KW_SETS | KW_TRUNCATE | KW_NOSCAN | KW_USER | KW_ROLE | KW_ROLES | KW_INNER | KW_DEFINED | KW_ADMIN + KW_TRUE | KW_FALSE | KW_LIKE | KW_EXISTS | KW_ASC | KW_DESC | KW_ORDER | KW_GROUP | KW_BY | KW_AS | KW_INSERT | KW_OVERWRITE | KW_OUTER | KW_LEFT | KW_RIGHT | KW_FULL | KW_PARTITION | KW_PARTITIONS | KW_TABLE | KW_TABLES | KW_COLUMNS | KW_INDEX | KW_INDEXES | KW_REBUILD | KW_FUNCTIONS | KW_SHOW | KW_MSCK | KW_REPAIR | KW_DIRECTORY | KW_LOCAL | KW_USING | KW_CLUSTER | KW_DISTRIBUTE | KW_SORT | KW_UNION | KW_LOAD | KW_EXPORT | KW_IMPORT | KW_DATA | KW_INPATH | KW_IS | KW_NULL | KW_CREATE | KW_EXTERNAL | KW_ALTER | KW_CHANGE | KW_FIRST | KW_AFTER | KW_DESCRIBE | KW_DROP | KW_RENAME | KW_IGNORE | KW_PROTECTION | KW_TO | KW_COMMENT | KW_BOOLEAN | KW_TINYINT | KW_SMALLINT | KW_INT | KW_BIGINT | KW_FLOAT | KW_DOUBLE | KW_DATE | KW_DATETIME | KW_TIMESTAMP | KW_DECIMAL | KW_STRING | KW_ARRAY | KW_STRUCT | KW_UNIONTYPE | KW_PARTITIONED | KW_CLUSTERED | KW_SORTED | KW_INTO | KW_BUCKETS | KW_ROW | KW_ROWS | KW_FORMAT | KW_DELIMITED | KW_FIELDS | KW_TERMINATED | KW_ESCAPED | KW_COLLECTION | KW_ITEMS | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_STORED | KW_FILEFORMAT | KW_SEQUENCEFILE | KW_TEXTFILE | KW_RCFILE | KW_ORCFILE | KW_INPUTFORMAT | KW_OUTPUTFORMAT | KW_INPUTDRIVER | KW_OUTPUTDRIVER | KW_OFFLINE | KW_ENABLE | KW_DISABLE | KW_READONLY | KW_NO_DROP | KW_LOCATION | KW_BUCKET | KW_OUT | KW_OF | KW_PERCENT | KW_ADD | KW_REPLACE | KW_RLIKE | KW_REGEXP | KW_TEMPORARY | KW_EXPLAIN | KW_FORMATTED | KW_PRETTY | KW_DEPENDENCY | KW_LOGICAL | KW_SERDE | KW_WITH | KW_DEFERRED | KW_SERDEPROPERTIES | KW_DBPROPERTIES | KW_LIMIT | KW_SET | KW_UNSET | KW_TBLPROPERTIES | KW_IDXPROPERTIES | KW_VALUE_TYPE | KW_ELEM_TYPE | KW_MAPJOIN | KW_STREAMTABLE | KW_HOLD_DDLTIME | KW_CLUSTERSTATUS | KW_UTC | KW_UTCTIMESTAMP | KW_LONG | KW_DELETE | KW_PLUS | KW_MINUS | KW_FETCH | KW_INTERSECT | KW_VIEW | KW_IN | KW_DATABASES | KW_MATERIALIZED | KW_SCHEMA | KW_SCHEMAS | KW_GRANT | KW_REVOKE | KW_SSL | KW_UNDO | KW_LOCK | KW_LOCKS | KW_UNLOCK | KW_SHARED | KW_EXCLUSIVE | KW_PROCEDURE | KW_UNSIGNED | KW_WHILE | KW_READ | KW_READS | KW_PURGE | KW_RANGE | KW_ANALYZE | KW_BEFORE | KW_BETWEEN | KW_BOTH | KW_BINARY | KW_CONTINUE | KW_CURSOR | KW_TRIGGER | KW_RECORDREADER | KW_RECORDWRITER | KW_SEMI | KW_LATERAL | KW_TOUCH | KW_ARCHIVE | KW_UNARCHIVE | KW_COMPUTE | KW_STATISTICS | KW_USE | KW_OPTION | KW_CONCATENATE | KW_SHOW_DATABASE | KW_UPDATE | KW_RESTRICT | KW_CASCADE | KW_SKEWED | KW_ROLLUP | KW_CUBE | KW_DIRECTORIES | KW_FOR | KW_GROUPING | KW_SETS | KW_TRUNCATE | KW_NOSCAN | KW_USER | KW_ROLE | KW_ROLES | KW_INNER | KW_DEFINED | KW_ADMIN | KW_COMPACT | KW_COMPACTIONS | KW_TRANSACTIONS ; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 835a654..d6011c1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -102,11 +102,14 @@ commandType.put(HiveParser.TOK_ALTERTABLE_SKEWED, HiveOperation.ALTERTABLE_SKEWED); commandType.put(HiveParser.TOK_ANALYZE, HiveOperation.ANALYZE_TABLE); commandType.put(HiveParser.TOK_ALTERVIEW_RENAME, HiveOperation.ALTERVIEW_RENAME); + commandType.put(HiveParser.TOK_ALTERTABLE_PARTCOLTYPE, HiveOperation.ALTERTABLE_PARTCOLTYPE); + commandType.put(HiveParser.TOK_SHOW_COMPACTIONS, HiveOperation.SHOW_COMPACTIONS); + commandType.put(HiveParser.TOK_SHOW_TRANSACTIONS, HiveOperation.SHOW_TRANSACTIONS); } static { tablePartitionCommandType.put( - HiveParser.TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE, + HiveParser.TOK_ALTERTABLE_PROTECTMODE, new HiveOperation[] { HiveOperation.ALTERTABLE_PROTECTMODE, HiveOperation.ALTERPARTITION_PROTECTMODE }); tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_FILEFORMAT, @@ -115,7 +118,7 @@ tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_LOCATION, new HiveOperation[] { HiveOperation.ALTERTABLE_LOCATION, HiveOperation.ALTERPARTITION_LOCATION }); - tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_ALTERPARTS_MERGEFILES, + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_MERGEFILES, new HiveOperation[] {HiveOperation.ALTERTABLE_MERGEFILES, HiveOperation.ALTERPARTITION_MERGEFILES }); tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_SERIALIZER, @@ -126,6 +129,8 @@ HiveOperation.ALTERPARTITION_SERDEPROPERTIES }); tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_RENAMEPART, new HiveOperation[] {null, HiveOperation.ALTERTABLE_RENAMEPART}); + tablePartitionCommandType.put(HiveParser.TOK_COMPACT, + new HiveOperation[] {null, HiveOperation.ALTERTABLE_COMPACT}); tablePartitionCommandType.put(HiveParser.TOK_ALTERTBLPART_SKEWED_LOCATION, new HiveOperation[] {HiveOperation.ALTERTBLPART_SKEWED_LOCATION, HiveOperation.ALTERTBLPART_SKEWED_LOCATION }); @@ -172,6 +177,7 @@ public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) case HiveParser.TOK_DROPTABLE_PROPERTIES: case HiveParser.TOK_ALTERTABLE_SERIALIZER: case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES: + case HiveParser.TOK_ALTERTABLE_PARTCOLTYPE: case HiveParser.TOK_ALTERINDEX_REBUILD: case HiveParser.TOK_ALTERINDEX_PROPERTIES: case HiveParser.TOK_ALTERVIEW_PROPERTIES: @@ -190,13 +196,14 @@ public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) case HiveParser.TOK_SHOWINDEXES: case HiveParser.TOK_SHOWLOCKS: case HiveParser.TOK_SHOWDBLOCKS: + case HiveParser.TOK_SHOW_COMPACTIONS: + case HiveParser.TOK_SHOW_TRANSACTIONS: case HiveParser.TOK_CREATEINDEX: case HiveParser.TOK_DROPINDEX: case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT: case HiveParser.TOK_ALTERTABLE_TOUCH: case HiveParser.TOK_ALTERTABLE_ARCHIVE: case HiveParser.TOK_ALTERTABLE_UNARCHIVE: - case HiveParser.TOK_ALTERTABLE_ALTERPARTS: case HiveParser.TOK_LOCKTABLE: case HiveParser.TOK_UNLOCKTABLE: case HiveParser.TOK_LOCKDB: diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index f6a3b43..20d863b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -48,7 +48,7 @@ ADDFILEFORMAT, ADDCLUSTERSORTCOLUMN, RENAMECOLUMN, ADDPARTITION, TOUCH, ARCHIVE, UNARCHIVE, ALTERPROTECTMODE, ALTERPARTITIONPROTECTMODE, ALTERLOCATION, DROPPARTITION, RENAMEPARTITION, ADDSKEWEDBY, ALTERSKEWEDLOCATION, - ALTERBUCKETNUM, ALTERPARTITION + ALTERBUCKETNUM, ALTERPARTITION, COMPACT } public static enum ProtectModeType { @@ -702,4 +702,5 @@ public boolean getIsDropIfExists() { return isDropIfExists; } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java index 278a2ef..541675c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.plan; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; @@ -30,6 +31,7 @@ private String tableName; private String dbName; private LinkedHashMap partSpec; + private String compactionType; AlterTableTypes type; @@ -57,6 +59,22 @@ public AlterTableSimpleDesc(String dbName, String tableName, this.type = type; } + /** + * Constructor for ALTER TABLE ... COMPACT. + * @param dbname name of the database containing the table + * @param tableName name of the table to compact + * @param partSpec partition to compact + * @param compactionType currently supported values: 'major' and 'minor' + */ + public AlterTableSimpleDesc(String dbname, String tableName, + LinkedHashMap partSpec, String compactionType) { + type = AlterTableTypes.COMPACT; + this.compactionType = compactionType; + this.dbName = dbname; + this.tableName = tableName; + this.partSpec = partSpec; + } + public String getTableName() { return tableName; } @@ -89,4 +107,12 @@ public void setPartSpec(LinkedHashMap partSpec) { this.partSpec = partSpec; } + /** + * Get what type of compaction is being done by a ALTER TABLE ... COMPACT statement. + * @return Compaction type, currently supported values are 'major' and 'minor'. + */ + public String getCompactionType() { + return compactionType; + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index 409e0a7..bfe3e86 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -52,6 +52,8 @@ private UnlockTableDesc unlockTblDesc; private ShowFunctionsDesc showFuncsDesc; private ShowLocksDesc showLocksDesc; + private ShowCompactionsDesc showCompactionsDesc; + private ShowTxnsDesc showTxnsDesc; private DescFunctionDesc descFunctionDesc; private ShowPartitionsDesc showPartsDesc; private ShowCreateTableDesc showCreateTblDesc; @@ -323,7 +325,19 @@ public DDLWork(HashSet inputs, HashSet outputs, this.showLocksDesc = showLocksDesc; } - /** + public DDLWork(HashSet inputs, HashSet outputs, + ShowCompactionsDesc showCompactionsDesc) { + this(inputs, outputs); + this.showCompactionsDesc = showCompactionsDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + ShowTxnsDesc showTxnsDesc) { + this(inputs, outputs); + this.showTxnsDesc = showTxnsDesc; + } + + /** * @param descFuncDesc */ public DDLWork(HashSet inputs, HashSet outputs, @@ -711,6 +725,16 @@ public ShowLocksDesc getShowLocksDesc() { return showLocksDesc; } + @Explain(displayName = "Show Compactions Operator") + public ShowCompactionsDesc getShowCompactionsDesc() { + return showCompactionsDesc; + } + + @Explain(displayName = "Show Transactions Operator") + public ShowTxnsDesc getShowTxnsDesc() { + return showTxnsDesc; + } + /** * @return the lockTblDesc */ @@ -751,6 +775,14 @@ public void setShowLocksDesc(ShowLocksDesc showLocksDesc) { this.showLocksDesc = showLocksDesc; } + public void setShowCompactionsDesc(ShowCompactionsDesc showCompactionsDesc) { + this.showCompactionsDesc = showCompactionsDesc; + } + + public void setShowTxnsDesc(ShowTxnsDesc showTxnsDesc) { + this.showTxnsDesc = showTxnsDesc; + } + /** * @param lockTblDesc * the lockTblDesc to set diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java index fe88a50..7e4d692 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java @@ -106,7 +106,12 @@ ALTERTABLE_SKEWED("ALTERTABLE_SKEWED", new Privilege[] {Privilege.ALTER_METADATA}, null), ALTERTBLPART_SKEWED_LOCATION("ALTERTBLPART_SKEWED_LOCATION", new Privilege[] {Privilege.ALTER_DATA}, null), + ALTERTABLE_PARTCOLTYPE("ALTERTABLE_PARTCOLTYPE", new Privilege[] { Privilege.SELECT }, new Privilege[] { Privilege.ALTER_DATA }), ALTERVIEW_RENAME("ALTERVIEW_RENAME", new Privilege[] {Privilege.ALTER_METADATA}, null), + ALTERTABLE_COMPACT("ALTERTABLE_COMPACT", new Privilege[]{Privilege.SELECT}, + new Privilege[]{Privilege.ALTER_DATA}), + SHOW_COMPACTIONS("SHOW COMPACTIONS", null, null), + SHOW_TRANSACTIONS("SHOW TRANSACTIONS", null, null); ; private String operationName; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCompactionsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCompactionsDesc.java new file mode 100644 index 0000000..94fd289 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCompactionsDesc.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.plan; + +import org.apache.hadoop.fs.Path; + +import java.io.Serializable; + +/** + * Descriptor for showing compactions. + */ +public class ShowCompactionsDesc extends DDLDesc implements Serializable { + + private static final long serialVersionUID = 1L; + private static final String schema = "dbname,tabname,partname,type,state,workerid," + + "starttime#string:string:string:string:string:string:string"; + + private String resFile; + + /** + * + * @param resFile File that results of show will be written to. + */ + public ShowCompactionsDesc(Path resFile) { + this.resFile = resFile.toString(); + } + + /** + * No arg constructor for serialization. + */ + public ShowCompactionsDesc() { + } + + public String getSchema() { + return schema; + } + + public String getResFile() { + return resFile; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java index 3eee8de..1902d36 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java @@ -22,6 +22,7 @@ import java.util.HashMap; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; /** * ShowLocksDesc. @@ -35,6 +36,7 @@ String tableName; HashMap partSpec; boolean isExt; + boolean isNewLockFormat; /** * table name for the result of show locks. @@ -45,6 +47,13 @@ */ private static final String schema = "tab_name,mode#string:string"; + /** + * Schema for use with db txn manager. + */ + private static final String newFormatSchema = "lockid,database,table,partition,lock_state," + + "lock_type,transaction_id,last_heartbeat,acquired_at,user," + + "hostname#string:string:string:string:string:string:string:string:string:string:string"; + public String getDatabase() { return dbName; } @@ -54,7 +63,8 @@ public String getTable() { } public String getSchema() { - return schema; + if (isNewLockFormat) return newFormatSchema; + else return schema; } public ShowLocksDesc() { @@ -63,23 +73,25 @@ public ShowLocksDesc() { /** * @param resFile */ - public ShowLocksDesc(Path resFile, String dbName, boolean isExt) { + public ShowLocksDesc(Path resFile, String dbName, boolean isExt, boolean isNewFormat) { this.resFile = resFile.toString(); this.partSpec = null; this.tableName = null; this.isExt = isExt; this.dbName = dbName; + isNewLockFormat = isNewFormat; } /** * @param resFile */ public ShowLocksDesc(Path resFile, String tableName, - HashMap partSpec, boolean isExt) { + HashMap partSpec, boolean isExt, boolean isNewFormat) { this.resFile = resFile.toString(); this.partSpec = partSpec; this.tableName = tableName; this.isExt = isExt; + isNewLockFormat = isNewFormat; } public String getDbName() { @@ -152,4 +164,8 @@ public boolean isExt() { public void setExt(boolean isExt) { this.isExt = isExt; } + + public boolean isNewFormat() { + return isNewLockFormat; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTxnsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTxnsDesc.java new file mode 100644 index 0000000..c4508d0 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTxnsDesc.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.plan; + +import org.apache.hadoop.fs.Path; + +import java.io.Serializable; + +/** + * Descriptor for showing transactions. + */ +public class ShowTxnsDesc extends DDLDesc implements Serializable { + + private static final long serialVersionUID = 1L; + private static final String schema = "txnid,state,user,host#string:string:string:string"; + + private String resFile; + + /** + * + * @param resFile File that results of show will be written to. + */ + public ShowTxnsDesc(Path resFile) { + this.resFile = resFile.toString(); + } + + /** + * No arg constructor for serialization. + */ + public ShowTxnsDesc() { + } + + public String getSchema() { + return schema; + } + + public String getResFile() { + return resFile; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java index e20b183..d765af6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java @@ -49,6 +49,7 @@ ALTERTABLE_UNARCHIVE, ALTERTABLE_PROPERTIES, ALTERTABLE_SERIALIZER, + ALTERTABLE_PARTCOLTYPE, ALTERPARTITION_SERIALIZER, ALTERTABLE_SERDEPROPERTIES, ALTERPARTITION_SERDEPROPERTIES, @@ -106,5 +107,7 @@ ALTERTABLE_SKEWED, ALTERTBLPART_SKEWED_LOCATION, ALTERVIEW_RENAME, + ALTERTABLE_COMPACT, + SHOW_COMPACTIONS, } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java new file mode 100644 index 0000000..5f32d5f --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java @@ -0,0 +1,131 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse; + +import junit.framework.Assert; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; +import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; +import org.apache.hadoop.hive.ql.plan.DDLWork; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Tests for parsing and semantic analysis of ALTER TABLE ... compact. + */ +public class TestQBCompact { + static HiveConf conf; + + @BeforeClass + public static void init() throws Exception { + conf = new HiveConf(); + SessionState.start(conf); + + // Create a table so we can work against it + Hive h = Hive.get(conf); + List cols = new ArrayList(); + cols.add("a"); + List partCols = new ArrayList(); + partCols.add("ds"); + h.createTable("foo", cols, partCols, OrcInputFormat.class, OrcOutputFormat.class); + Table t = h.getTable("foo"); + Map partSpec = new HashMap(); + partSpec.put("ds", "today"); + h.createPartition(t, partSpec); + } + + private AlterTableSimpleDesc parseAndAnalyze(String query) throws Exception { + ParseDriver hd = new ParseDriver(); + ASTNode head = (ASTNode)hd.parse(query).getChild(0); + System.out.println("HERE " + head.dump()); + BaseSemanticAnalyzer a = SemanticAnalyzerFactory.get(conf, head); + a.analyze(head, new Context(conf)); + List> roots = a.getRootTasks(); + Assert.assertEquals(1, roots.size()); + return ((DDLWork)roots.get(0).getWork()).getAlterTblSimpleDesc(); + } + + + @Test + public void testNonPartitionedTable() throws Exception { + boolean sawException = false; + AlterTableSimpleDesc desc = parseAndAnalyze("alter table foo compact 'major'"); + Assert.assertEquals("major", desc.getCompactionType()); + Assert.assertEquals("foo", desc.getTableName()); + Assert.assertEquals("default", desc.getDbName()); + } + + @Test + public void testBogusLevel() throws Exception { + boolean sawException = false; + try { + parseAndAnalyze("alter table foo partition(ds = 'today') compact 'bogus'"); + } catch (SemanticException e) { + sawException = true; + Assert.assertEquals(ErrorMsg.INVALID_COMPACTION_TYPE.getMsg(), e.getMessage()); + } + Assert.assertTrue(sawException); + } + + @Test + public void testMajor() throws Exception { + AlterTableSimpleDesc desc = + parseAndAnalyze("alter table foo partition(ds = 'today') compact 'major'"); + Assert.assertEquals("major", desc.getCompactionType()); + Assert.assertEquals("foo", desc.getTableName()); + Assert.assertEquals("default", desc.getDbName()); + HashMap parts = desc.getPartSpec(); + Assert.assertEquals(1, parts.size()); + Assert.assertEquals("today", parts.get("ds")); + } + + @Test + public void testMinor() throws Exception { + AlterTableSimpleDesc desc = + parseAndAnalyze("alter table foo partition(ds = 'today') compact 'minor'"); + Assert.assertEquals("minor", desc.getCompactionType()); + Assert.assertEquals("foo", desc.getTableName()); + Assert.assertEquals("default", desc.getDbName()); + HashMap parts = desc.getPartSpec(); + Assert.assertEquals(1, parts.size()); + Assert.assertEquals("today", parts.get("ds")); + } + + @Test + public void showCompactions() throws Exception { + parseAndAnalyze("show compactions"); + } + + @Test + public void showTxns() throws Exception { + parseAndAnalyze("show transactions"); + } +} diff --git a/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q b/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q new file mode 100644 index 0000000..6612fe8 --- /dev/null +++ b/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q @@ -0,0 +1,12 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.txn.testing=true; + +create table T1(key string, val string) stored as textfile; + +set hive.txn.testing=true; +alter table T1 compact 'major'; + +alter table T1 compact 'minor'; + +drop table T1; diff --git a/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q b/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q new file mode 100644 index 0000000..599cad9 --- /dev/null +++ b/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q @@ -0,0 +1,14 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.txn.testing=true; + +create table T1(key string, val string) partitioned by (ds string) stored as textfile; + +alter table T1 add partition (ds = 'today'); +alter table T1 add partition (ds = 'yesterday'); + +alter table T1 partition (ds = 'today') compact 'major'; + +alter table T1 partition (ds = 'yesterday') compact 'minor'; + +drop table T1; diff --git a/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q b/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q new file mode 100644 index 0000000..871d292 --- /dev/null +++ b/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q @@ -0,0 +1,15 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.txn.testing=true; + +create database D1; + +use D1; + +create table T1(key string, val string) stored as textfile; + +alter table T1 compact 'major'; + +alter table T1 compact 'minor'; + +drop table T1; diff --git a/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q b/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q new file mode 100644 index 0000000..7c71fdd --- /dev/null +++ b/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q @@ -0,0 +1,11 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.txn.testing=true; + +show locks; + +show locks extended; + +show locks default; + +show transactions; diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out new file mode 100644 index 0000000..edee626 --- /dev/null +++ b/ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out @@ -0,0 +1,21 @@ +PREHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table T1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@T1 +PREHOOK: query: alter table T1 compact 'major' +PREHOOK: type: null +POSTHOOK: query: alter table T1 compact 'major' +POSTHOOK: type: null +PREHOOK: query: alter table T1 compact 'minor' +PREHOOK: type: null +POSTHOOK: query: alter table T1 compact 'minor' +POSTHOOK: type: null +PREHOOK: query: drop table T1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@t1 +PREHOOK: Output: default@t1 +POSTHOOK: query: drop table T1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out new file mode 100644 index 0000000..a29c2c8 --- /dev/null +++ b/ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out @@ -0,0 +1,35 @@ +PREHOOK: query: create table T1(key string, val string) partitioned by (ds string) stored as textfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table T1(key string, val string) partitioned by (ds string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@T1 +PREHOOK: query: alter table T1 add partition (ds = 'today') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@t1 +POSTHOOK: query: alter table T1 add partition (ds = 'today') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@t1@ds=today +PREHOOK: query: alter table T1 add partition (ds = 'yesterday') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@t1 +POSTHOOK: query: alter table T1 add partition (ds = 'yesterday') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@t1@ds=yesterday +PREHOOK: query: alter table T1 partition (ds = 'today') compact 'major' +PREHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: query: alter table T1 partition (ds = 'today') compact 'major' +POSTHOOK: type: ALTERTABLE_COMPACT +PREHOOK: query: alter table T1 partition (ds = 'yesterday') compact 'minor' +PREHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: query: alter table T1 partition (ds = 'yesterday') compact 'minor' +POSTHOOK: type: ALTERTABLE_COMPACT +PREHOOK: query: drop table T1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@t1 +PREHOOK: Output: default@t1 +POSTHOOK: query: drop table T1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out new file mode 100644 index 0000000..e087dff --- /dev/null +++ b/ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out @@ -0,0 +1,29 @@ +PREHOOK: query: create database D1 +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: create database D1 +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: use D1 +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: use D1 +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table T1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: D1@T1 +PREHOOK: query: alter table T1 compact 'major' +PREHOOK: type: null +POSTHOOK: query: alter table T1 compact 'major' +POSTHOOK: type: null +PREHOOK: query: alter table T1 compact 'minor' +PREHOOK: type: null +POSTHOOK: query: alter table T1 compact 'minor' +POSTHOOK: type: null +PREHOOK: query: drop table T1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: d1@t1 +PREHOOK: Output: d1@t1 +POSTHOOK: query: drop table T1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: d1@t1 +POSTHOOK: Output: d1@t1 diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out new file mode 100644 index 0000000..d9d2ed6 --- /dev/null +++ b/ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out @@ -0,0 +1,20 @@ +PREHOOK: query: show locks +PREHOOK: type: SHOWLOCKS +POSTHOOK: query: show locks +POSTHOOK: type: SHOWLOCKS +Lock ID Database Table Partition State Type Transaction ID Last Hearbeat Acquired At User Hostname +PREHOOK: query: show locks extended +PREHOOK: type: SHOWLOCKS +POSTHOOK: query: show locks extended +POSTHOOK: type: SHOWLOCKS +Lock ID Database Table Partition State Type Transaction ID Last Hearbeat Acquired At User Hostname +PREHOOK: query: show locks default +PREHOOK: type: SHOWLOCKS +POSTHOOK: query: show locks default +POSTHOOK: type: SHOWLOCKS +Lock ID Database Table Partition State Type Transaction ID Last Hearbeat Acquired At User Hostname +PREHOOK: query: show transactions +PREHOOK: type: SHOW TRANSACTIONS +POSTHOOK: query: show transactions +POSTHOOK: type: SHOW TRANSACTIONS +Transaction ID Transaction State User Hostname