diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index e185f12..eccf8f6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -1116,7 +1116,7 @@ private int createIndex(Hive db, CreateIndexDesc crtIndex) throws HiveException MetaStoreUtils.getIndexTableName(SessionState.get().getCurrentDatabase(), crtIndex.getTableName(), crtIndex.getIndexName()); Table indexTable = db.getTable(indexTableName); - work.getOutputs().add(new WriteEntity(indexTable, WriteEntity.WriteType.DDL)); + work.getOutputs().add(new WriteEntity(indexTable, WriteEntity.WriteType.DDL_SHARED)); } return 0; } @@ -1235,7 +1235,8 @@ private int renamePartition(Hive db, RenamePartitionDesc renamePartitionDesc) th Partition newPart = db .getPartition(tbl, renamePartitionDesc.getNewPartSpec(), false); work.getInputs().add(new ReadEntity(oldPart)); - work.getOutputs().add(new WriteEntity(newPart, WriteEntity.WriteType.DDL)); + // We've already obtained a lock on the table, don't lock the partition too + work.getOutputs().add(new WriteEntity(newPart, WriteEntity.WriteType.DDL_NO_LOCK)); return 0; } @@ -1277,7 +1278,8 @@ private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionD } work.getInputs().add(new ReadEntity(tbl)); - work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL)); + // We've already locked the table as the input, don't relock it as the output. + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); return 0; } @@ -1306,7 +1308,7 @@ private int touch(Hive db, AlterTableSimpleDesc touchDesc) throw new HiveException("Uable to update table"); } work.getInputs().add(new ReadEntity(tbl)); - work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_METADATA_ONLY)); + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); } else { Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false); if (part == null) { @@ -1318,7 +1320,7 @@ private int touch(Hive db, AlterTableSimpleDesc touchDesc) throw new HiveException(e); } work.getInputs().add(new ReadEntity(part)); - work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.DDL_METADATA_ONLY)); + work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK)); } return 0; } @@ -3817,19 +3819,20 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { // contains the new table. This is needed for rename - both the old and the // new table names are // passed + // Don't acquire locks for any of these, we have already asked for them in DDLSemanticAnalyzer. if(part != null) { work.getInputs().add(new ReadEntity(part)); - work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.DDL)); + work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK)); } else if (allPartitions != null ){ for (Partition tmpPart: allPartitions) { work.getInputs().add(new ReadEntity(tmpPart)); - work.getOutputs().add(new WriteEntity(tmpPart, WriteEntity.WriteType.DDL)); + work.getOutputs().add(new WriteEntity(tmpPart, WriteEntity.WriteType.DDL_NO_LOCK)); } } else { work.getInputs().add(new ReadEntity(oldTbl)); - work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL)); + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); } return 0; } @@ -3867,7 +3870,8 @@ private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws Hi dropTbl.getPartSpecs(), true, dropTbl.getIgnoreProtection(), true); for (Partition partition : droppedParts) { console.printInfo("Dropped the partition " + partition.getName()); - work.getOutputs().add(new WriteEntity(partition, WriteEntity.WriteType.DDL)); + // We have already locked the table, don't lock the partitions. + work.getOutputs().add(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK)); }; } @@ -3921,7 +3925,8 @@ private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveExc // drop the table db.dropTable(dropTbl.getTableName()); if (tbl != null) { - work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL)); + // We have already locked the table in DDLSemenaticAnalyzer, don't do it again here + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); } } @@ -4196,7 +4201,7 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { // create the table db.createTable(tbl, crtTbl.getIfNotExists()); - work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL)); + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_EXCLUSIVE)); return 0; } @@ -4304,7 +4309,7 @@ private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws HiveExce // create the table db.createTable(tbl, crtTbl.getIfNotExists()); - work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL)); + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_EXCLUSIVE)); return 0; } @@ -4340,7 +4345,7 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { } catch (InvalidOperationException e) { throw new HiveException(e); } - work.getOutputs().add(new WriteEntity(oldview, WriteEntity.WriteType.DDL)); + work.getOutputs().add(new WriteEntity(oldview, WriteEntity.WriteType.DDL_SHARED)); } else { // create new view Table tbl = db.newTable(crtView.getViewName()); @@ -4367,7 +4372,7 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { } db.createTable(tbl, crtView.getIfNotExists()); - work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL)); + work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_EXCLUSIVE)); } return 0; } diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java index 44a3924..e8f4b17 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.ql.metadata.DummyPartition; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import java.io.Serializable; @@ -35,10 +36,10 @@ private boolean isTempURI = false; public static enum WriteType { - DDL, // for use in DDL statements that will touch data, - // will result in an exclusive lock, - DDL_METADATA_ONLY, // for use in DDL statements that touch only - // metadata and don't need a lock + DDL_EXCLUSIVE, // for use in DDL statements that require an exclusive lock, + // such as dropping a table or partition + DDL_SHARED, // for use in DDL operations that only need a shared lock, such as creating a table + DDL_NO_LOCK, // for use in DDL statements that do not require a lock INSERT, INSERT_OVERWRITE, UPDATE, @@ -147,4 +148,43 @@ public boolean isTempURI() { return isTempURI; } + /** + * Determine the type of lock to request for a given alter table type. + * @param op Operation type from the alter table description + * @return the write type this should use. + */ + public static WriteType determineAlterTableWriteType(AlterTableDesc.AlterTableTypes op) { + switch (op) { + case RENAMECOLUMN: + case ADDCLUSTERSORTCOLUMN: + case ADDFILEFORMAT: + case ADDSERDE: + case DROPPROPS: + case REPLACECOLS: + case ARCHIVE: + case UNARCHIVE: + case ALTERPROTECTMODE: + case ALTERPARTITIONPROTECTMODE: + case ALTERLOCATION: + case DROPPARTITION: + case RENAMEPARTITION: + case ADDSKEWEDBY: + case ALTERSKEWEDLOCATION: + case ALTERBUCKETNUM: + case RENAME: return WriteType.DDL_EXCLUSIVE; + + case ADDPARTITION: + case ADDSERDEPROPS: + case ADDPROPS: + case ALTERPARTITION: + case ADDCOLS: return WriteType.DDL_SHARED; + + case COMPACT: + case TOUCH: return WriteType.DDL_NO_LOCK; + + default: + throw new RuntimeException("Unknown operation " + op.toString()); + } + } + } diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 7750ef4..33c12e7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -135,13 +135,13 @@ public void acquireLocks(QueryPlan plan, Context ctx, String username) throws Lo Table t = null; LOG.debug("output is null " + (output == null)); switch (output.getWriteType()) { - case DDL: + case DDL_EXCLUSIVE: case INSERT_OVERWRITE: compBuilder.setExclusive(); break; case INSERT: - case DDL_METADATA_ONLY: + case DDL_SHARED: compBuilder.setShared(); break; @@ -150,6 +150,9 @@ public void acquireLocks(QueryPlan plan, Context ctx, String username) throws Lo compBuilder.setSemiShared(); break; + case DDL_NO_LOCK: + continue; // No lock required here + default: throw new RuntimeException("Unknown write type " + output.getWriteType().toString()); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 56cbcf8..bd428e8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -615,7 +615,7 @@ private void analyzeAlterDatabaseProperties(ASTNode ast) throws SemanticExceptio private void addAlterDbDesc(AlterDatabaseDesc alterDesc) throws SemanticException { Database database = getDatabase(alterDesc.getDatabaseName()); - outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_METADATA_ONLY)); + outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterDesc), conf)); } @@ -770,12 +770,14 @@ private void analyzeDropDatabase(ASTNode ast) throws SemanticException { if (tableNames != null) { for (String tableName : tableNames) { Table table = getTable(dbName, tableName, true); - outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL)); + // We want no lock here, as the database lock will cover the tables, + // and putting a lock will actually cause us to deadlock on ourselves. + outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK)); } } } inputs.add(new ReadEntity(database)); - outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL)); + outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_EXCLUSIVE)); DropDatabaseDesc dropDatabaseDesc = new DropDatabaseDesc(dbName, ifExists, ifCascade); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropDatabaseDesc), conf)); @@ -801,7 +803,7 @@ private void analyzeDropTable(ASTNode ast, boolean expectView) Table tab = getTable(tableName, throwException); if (tab != null) { inputs.add(new ReadEntity(tab)); - outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL)); + outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE)); } DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, ifExists); @@ -826,19 +828,19 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { Map partSpec = getPartSpec((ASTNode) root.getChild(1)); if (partSpec == null) { if (!table.isPartitioned()) { - outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL)); + outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_EXCLUSIVE)); } else { for (Partition partition : getPartitions(table, null, false)) { - outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL)); + outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE)); } } } else { if (isFullSpec(table, partSpec)) { Partition partition = getPartition(table, partSpec, true); - outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL)); + outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE)); } else { for (Partition partition : getPartitions(table, partSpec, false)) { - outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL)); + outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE)); } } } @@ -1375,19 +1377,22 @@ private void addInputsOutputsAlterTable(String tableName, Map pa private void addInputsOutputsAlterTable(String tableName, Map partSpec, AlterTableDesc desc) throws SemanticException { Table tab = getTable(tableName, true); + // Determine the lock type to acquire + WriteEntity.WriteType writeType = desc == null ? WriteEntity.WriteType.DDL_EXCLUSIVE : + WriteEntity.determineAlterTableWriteType(desc.getOp()); if (partSpec == null || partSpec.isEmpty()) { inputs.add(new ReadEntity(tab)); - outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL)); + outputs.add(new WriteEntity(tab, writeType)); } else { inputs.add(new ReadEntity(tab)); if (desc == null || desc.getOp() != AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) { Partition part = getPartition(tab, partSpec, true); - outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL)); + outputs.add(new WriteEntity(part, writeType)); } else { for (Partition part : getPartitions(tab, partSpec, true)) { - outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL)); + outputs.add(new WriteEntity(part, writeType)); } } } @@ -2650,7 +2655,7 @@ private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) Table tab = getTable(tblName, true); boolean isView = tab.isView(); validateAlterTableType(tab, AlterTableTypes.ADDPARTITION, expectView); - outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL)); + outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_SHARED)); int numCh = ast.getChildCount(); int start = ifNotExists ? 2 : 1; @@ -2784,7 +2789,7 @@ private void analyzeAlterTableTouch(CommonTree ast) AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( SessionState.get().getCurrentDatabase(), tblName, null, AlterTableDesc.AlterTableTypes.TOUCH); - outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_METADATA_ONLY)); + outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_NO_LOCK)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc), conf)); } else { @@ -3052,7 +3057,8 @@ private void addTablePartsOutputs(String tblName, List> part } } for (Partition p : parts) { - outputs.add(new WriteEntity(p, WriteEntity.WriteType.DDL)); + // Don't request any locks here, as the table has already been locked. + outputs.add(new WriteEntity(p, WriteEntity.WriteType.DDL_NO_LOCK)); } } } @@ -3120,7 +3126,7 @@ private void analyzeAltertableSkewedby(ASTNode ast) throws SemanticException { Table tab = getTable(tableName, true); inputs.add(new ReadEntity(tab)); - outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL)); + outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE)); validateAlterTableType(tab, AlterTableTypes.ADDSKEWEDBY); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java index a57785e..e64ef76 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java @@ -171,7 +171,7 @@ private void addEntities(String functionName, boolean isTemporaryFunction) } } if (database != null) { - outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_METADATA_ONLY)); + outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK)); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index 92ec334..d32ebae 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -226,7 +226,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { loadTable(fromURI, table); } // Set this to read because we can't overwrite any existing partitions - outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_METADATA_ONLY)); + outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK)); } catch (InvalidTableException e) { LOG.debug("table " + tblDesc.getTableName() + " does not exist"); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java index aa07548..a918615 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java @@ -171,6 +171,6 @@ private void analyzeDropMacro(ASTNode ast) throws SemanticException { private void addEntities() throws SemanticException { Database database = getDatabase(MetaStoreUtils.DEFAULT_DATABASE_NAME); // This restricts macro creation to privileged users. - outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_METADATA_ONLY)); + outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK)); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index e3291be..e4a5f7d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -8829,7 +8829,7 @@ private void setupStats(TableScanDesc tsDesc, QBParseInfo qbp, Table tab, String tsDesc.setStatsAggPrefix(tab.getDbName()+"."+k); // set up WritenEntity for replication - outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_METADATA_ONLY)); + outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_NO_LOCK)); // add WriteEntity for each matching partition if (tab.isPartitioned()) { @@ -8840,7 +8840,7 @@ private void setupStats(TableScanDesc tsDesc, QBParseInfo qbp, Table tab, String if (partitions != null) { for (Partition partn : partitions) { // inputs.add(new ReadEntity(partn)); // is this needed at all? - outputs.add(new WriteEntity(partn, WriteEntity.WriteType.DDL_METADATA_ONLY)); + outputs.add(new WriteEntity(partn, WriteEntity.WriteType.DDL_NO_LOCK)); } } } @@ -9902,7 +9902,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) String[] qualified = Hive.getQualifiedNames(tableName); String dbName = qualified.length == 1 ? SessionState.get().getCurrentDatabase() : qualified[0]; Database database = getDatabase(dbName); - outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_METADATA_ONLY)); + outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_SHARED)); // Handle different types of CREATE TABLE command CreateTableDesc crtTblDesc = null; switch (command_type) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java index d591163..0354604 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java @@ -252,9 +252,9 @@ private PrivilegeObjectDesc analyzePrivilegeObject(ASTNode ast, Table tbl = getTable(SessionState.get().getCurrentDatabase(), subject.getObject()); if (subject.getPartSpec() != null) { Partition part = getPartition(tbl, subject.getPartSpec()); - outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_METADATA_ONLY)); + outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK)); } else { - outputs.add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_METADATA_ONLY)); + outputs.add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); } } diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java index 01e5085..98c3cc3 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java @@ -181,8 +181,8 @@ public void testDelete() throws Exception { } @Test - public void testDDL() throws Exception { - WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL); + public void testDDLExclusive() throws Exception { + WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL_EXCLUSIVE); QueryPlan qp = new MockQueryPlan(this); txnMgr.acquireLocks(qp, ctx, "fred"); List locks = ctx.getHiveLocks(); @@ -194,6 +194,30 @@ public void testDDL() throws Exception { Assert.assertEquals(0, locks.size()); } + @Test + public void testDDLShared() throws Exception { + WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL_SHARED); + QueryPlan qp = new MockQueryPlan(this); + txnMgr.acquireLocks(qp, ctx, "fred"); + List locks = ctx.getHiveLocks(); + Assert.assertEquals(1, locks.size()); + Assert.assertEquals(1, + TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + txnMgr.getLockManager().unlock(locks.get(0)); + locks = txnMgr.getLockManager().getLocks(false, false); + Assert.assertEquals(0, locks.size()); + } + + @Test + public void testDDLNoLock() throws Exception { + WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL_NO_LOCK); + QueryPlan qp = new MockQueryPlan(this); + txnMgr.acquireLocks(qp, ctx, "fred"); + List locks = ctx.getHiveLocks(); + Assert.assertNull(locks); + } + + @Before public void setUp() throws Exception { TxnDbUtil.prepDb(); diff --git ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q new file mode 100644 index 0000000..23a5a45 --- /dev/null +++ ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q @@ -0,0 +1,67 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + +create database D1; + +alter database D1 set dbproperties('test'='yesthisis'); + +drop database D1; + +create table T1(key string, val string) stored as textfile; + +create table T2 like T1; + +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; + +select * from T1; + +create table T3 as select * from T1; + +create table T4 (key char(10), val decimal(5,2), b int) + partitioned by (ds string) + clustered by (b) into 10 buckets + stored as orc; + +alter table T3 rename to newT3; + +alter table T2 set tblproperties ('test'='thisisatest'); + +alter table T2 set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde'; +alter table T2 set serdeproperties ('test'='thisisatest'); + +alter table T2 clustered by (key) into 32 buckets; + +alter table T4 add partition (ds='today'); + +alter table T4 partition (ds='today') rename to partition(ds='yesterday'); + +alter table T4 drop partition (ds='yesterday'); + +alter table T4 add partition (ds='tomorrow'); + +create table T5 (a string, b int); +alter table T5 set fileformat orc; + +--create table T6 (a string, b int) partitioned by (ds string); +--alter table T6 add partition (ds='tomorrow'); +--alter table T6 partition (ds='tomorrow') set fileformat orc; + +create table T7 (a string, b int); +alter table T7 set location 'file:///tmp'; + +--create table T8 (a string, b int) partitioned by (ds string); +--alter table T8 add partition (ds='tomorrow'); +--alter table T8 partition (ds='tomorrow') set location 'file:///tmp'; + +alter table T2 touch; +alter table T4 touch partition (ds='tomorrow'); + +create view V1 as select key from T1; +alter view V1 set tblproperties ('test'='thisisatest'); +drop view V1; + + + +drop table T1; +drop table T2; +drop table newT3; diff --git ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out new file mode 100644 index 0000000..1dfb682 --- /dev/null +++ ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out @@ -0,0 +1,258 @@ +PREHOOK: query: create database D1 +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: create database D1 +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: alter database D1 set dbproperties('test'='yesthisis') +PREHOOK: type: ALTERDATABASE +PREHOOK: Output: database:d1 +POSTHOOK: query: alter database D1 set dbproperties('test'='yesthisis') +POSTHOOK: type: ALTERDATABASE +POSTHOOK: Output: database:d1 +PREHOOK: query: drop database D1 +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:d1 +PREHOOK: Output: database:d1 +POSTHOOK: query: drop database D1 +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:d1 +POSTHOOK: Output: database:d1 +PREHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table T1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T1 +PREHOOK: query: create table T2 like T1 +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table T2 like T1 +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +PREHOOK: query: select * from T1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from T1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 11 +2 12 +3 13 +7 17 +8 18 +8 28 +PREHOOK: query: create table T3 as select * from T1 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@t1 +POSTHOOK: query: create table T3 as select * from T1 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@T3 +PREHOOK: query: create table T4 (key char(10), val decimal(5,2), b int) + partitioned by (ds string) + clustered by (b) into 10 buckets + stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table T4 (key char(10), val decimal(5,2), b int) + partitioned by (ds string) + clustered by (b) into 10 buckets + stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T4 +PREHOOK: query: alter table T3 rename to newT3 +PREHOOK: type: ALTERTABLE_RENAME +PREHOOK: Input: default@t3 +PREHOOK: Output: default@t3 +POSTHOOK: query: alter table T3 rename to newT3 +POSTHOOK: type: ALTERTABLE_RENAME +POSTHOOK: Input: default@t3 +POSTHOOK: Output: default@newT3 +POSTHOOK: Output: default@t3 +PREHOOK: query: alter table T2 set tblproperties ('test'='thisisatest') +PREHOOK: type: ALTERTABLE_PROPERTIES +PREHOOK: Input: default@t2 +PREHOOK: Output: default@t2 +POSTHOOK: query: alter table T2 set tblproperties ('test'='thisisatest') +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: default@t2 +POSTHOOK: Output: default@t2 +PREHOOK: query: alter table T2 set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@t2 +PREHOOK: Output: default@t2 +POSTHOOK: query: alter table T2 set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@t2 +POSTHOOK: Output: default@t2 +PREHOOK: query: alter table T2 set serdeproperties ('test'='thisisatest') +PREHOOK: type: ALTERTABLE_SERDEPROPERTIES +PREHOOK: Input: default@t2 +PREHOOK: Output: default@t2 +POSTHOOK: query: alter table T2 set serdeproperties ('test'='thisisatest') +POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES +POSTHOOK: Input: default@t2 +POSTHOOK: Output: default@t2 +PREHOOK: query: alter table T2 clustered by (key) into 32 buckets +PREHOOK: type: ALTERTABLE_CLUSTER_SORT +PREHOOK: Input: default@t2 +PREHOOK: Output: default@t2 +POSTHOOK: query: alter table T2 clustered by (key) into 32 buckets +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT +POSTHOOK: Input: default@t2 +POSTHOOK: Output: default@t2 +PREHOOK: query: alter table T4 add partition (ds='today') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@t4 +POSTHOOK: query: alter table T4 add partition (ds='today') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@t4 +POSTHOOK: Output: default@t4@ds=today +PREHOOK: query: alter table T4 partition (ds='today') rename to partition(ds='yesterday') +PREHOOK: type: ALTERTABLE_RENAMEPART +PREHOOK: Input: default@t4 +PREHOOK: Output: default@t4@ds=today +POSTHOOK: query: alter table T4 partition (ds='today') rename to partition(ds='yesterday') +POSTHOOK: type: ALTERTABLE_RENAMEPART +POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t4@ds=today +POSTHOOK: Output: default@t4@ds=today +POSTHOOK: Output: default@t4@ds=yesterday +PREHOOK: query: alter table T4 drop partition (ds='yesterday') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@t4 +PREHOOK: Output: default@t4@ds=yesterday +POSTHOOK: query: alter table T4 drop partition (ds='yesterday') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@t4 +POSTHOOK: Output: default@t4@ds=yesterday +PREHOOK: query: alter table T4 add partition (ds='tomorrow') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@t4 +POSTHOOK: query: alter table T4 add partition (ds='tomorrow') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@t4 +POSTHOOK: Output: default@t4@ds=tomorrow +PREHOOK: query: create table T5 (a string, b int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table T5 (a string, b int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T5 +PREHOOK: query: alter table T5 set fileformat orc +PREHOOK: type: ALTERTABLE_FILEFORMAT +PREHOOK: Input: default@t5 +PREHOOK: Output: default@t5 +POSTHOOK: query: alter table T5 set fileformat orc +POSTHOOK: type: ALTERTABLE_FILEFORMAT +POSTHOOK: Input: default@t5 +POSTHOOK: Output: default@t5 +PREHOOK: query: --create table T6 (a string, b int) partitioned by (ds string); +--alter table T6 add partition (ds='tomorrow'); +--alter table T6 partition (ds='tomorrow') set fileformat orc; + +create table T7 (a string, b int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: --create table T6 (a string, b int) partitioned by (ds string); +--alter table T6 add partition (ds='tomorrow'); +--alter table T6 partition (ds='tomorrow') set fileformat orc; + +create table T7 (a string, b int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T7 +#### A masked pattern was here #### +PREHOOK: type: ALTERTABLE_LOCATION +PREHOOK: Input: default@t7 +PREHOOK: Output: default@t7 +#### A masked pattern was here #### +POSTHOOK: type: ALTERTABLE_LOCATION +POSTHOOK: Input: default@t7 +POSTHOOK: Output: default@t7 +#### A masked pattern was here #### +PREHOOK: query: --create table T8 (a string, b int) partitioned by (ds string); +--alter table T8 add partition (ds='tomorrow'); +#### A masked pattern was here #### + +alter table T2 touch +PREHOOK: type: ALTERTABLE_TOUCH +PREHOOK: Input: default@t2 +PREHOOK: Output: default@t2 +POSTHOOK: query: --create table T8 (a string, b int) partitioned by (ds string); +--alter table T8 add partition (ds='tomorrow'); +#### A masked pattern was here #### + +alter table T2 touch +POSTHOOK: type: ALTERTABLE_TOUCH +POSTHOOK: Input: default@t2 +POSTHOOK: Output: default@t2 +PREHOOK: query: alter table T4 touch partition (ds='tomorrow') +PREHOOK: type: ALTERTABLE_TOUCH +PREHOOK: Input: default@t4 +PREHOOK: Output: default@t4@ds=tomorrow +POSTHOOK: query: alter table T4 touch partition (ds='tomorrow') +POSTHOOK: type: ALTERTABLE_TOUCH +POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t4@ds=tomorrow +POSTHOOK: Output: default@t4@ds=tomorrow +PREHOOK: query: create view V1 as select key from T1 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@t1 +POSTHOOK: query: create view V1 as select key from T1 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@V1 +PREHOOK: query: alter view V1 set tblproperties ('test'='thisisatest') +PREHOOK: type: ALTERVIEW_PROPERTIES +PREHOOK: Input: default@v1 +PREHOOK: Output: default@v1 +POSTHOOK: query: alter view V1 set tblproperties ('test'='thisisatest') +POSTHOOK: type: ALTERVIEW_PROPERTIES +POSTHOOK: Input: default@v1 +POSTHOOK: Output: default@v1 +PREHOOK: query: drop view V1 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v1 +PREHOOK: Output: default@v1 +POSTHOOK: query: drop view V1 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v1 +POSTHOOK: Output: default@v1 +PREHOOK: query: drop table T1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@t1 +PREHOOK: Output: default@t1 +POSTHOOK: query: drop table T1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@t1 +PREHOOK: query: drop table T2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@t2 +PREHOOK: Output: default@t2 +POSTHOOK: query: drop table T2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@t2 +POSTHOOK: Output: default@t2 +PREHOOK: query: drop table newT3 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@newt3 +PREHOOK: Output: default@newt3 +POSTHOOK: query: drop table newT3 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@newt3 +POSTHOOK: Output: default@newt3