diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java index b828f4c..40dd992 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java @@ -782,6 +782,51 @@ public void testGetSplitsLocks() throws Exception { assertEquals(1, rows.size()); } + @Test + public void testGetSplitsLocksWithMaterializedView() throws Exception { + // Need to test this with LLAP settings, which requires some additional configurations set. + HiveConf modConf = new HiveConf(hiveConf); + setupTez(modConf); + modConf.setVar(ConfVars.HIVE_EXECUTION_ENGINE, "tez"); + modConf.setVar(ConfVars.HIVEFETCHTASKCONVERSION, "more"); + modConf.setVar(HiveConf.ConfVars.LLAP_DAEMON_SERVICE_HOSTS, "localhost"); + + // SessionState/Driver needs to be restarted with the Tez conf settings. + restartSessionAndDriver(modConf); + TxnStore txnHandler = TxnUtils.getTxnStore(modConf); + String mvName = "mv_acidTbl"; + try { + runStatementOnDriver("create materialized view " + mvName + " as select a from " + Table.ACIDTBL + " where a > 5"); + + // Request LLAP splits for a table. + String queryParam = "select a from " + Table.ACIDTBL + " where a > 5"; + runStatementOnDriver("select get_splits(\"" + queryParam + "\", 1)"); + + // The get_splits call should have resulted in a lock on ACIDTBL and materialized view mv_acidTbl + ShowLocksResponse slr = txnHandler.showLocks(new ShowLocksRequest()); + TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, + "default", Table.ACIDTBL.name, null, slr.getLocks()); + TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, + "default", mvName, null, slr.getLocks()); + assertEquals(2, slr.getLocksSize()); + } finally { + // Close the session which should free up the TxnHandler/locks held by the session. + // Done in the finally block to make sure we free up the locks; otherwise + // the cleanup in tearDown() will get stuck waiting on the lock held here on ACIDTBL. + restartSessionAndDriver(hiveConf); + runStatementOnDriver("drop materialized view if exists " + mvName); + } + + // Lock should be freed up now. + ShowLocksResponse slr = txnHandler.showLocks(new ShowLocksRequest()); + assertEquals(0, slr.getLocksSize()); + + List rows = runStatementOnDriver("show transactions"); + // Transactions should be committed. + // No transactions - just the header row + assertEquals(1, rows.size()); + } + private void restartSessionAndDriver(HiveConf conf) throws Exception { SessionState ss = SessionState.get(); if (ss != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 4e3d74c..374e973 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -242,7 +242,7 @@ long openTxn(Context ctx, String user, long delay) throws LockException { tableWriteIds.clear(); isExplicitTransaction = false; startTransactionCount = 0; - LOG.debug("Opened " + JavaUtils.txnIdToString(txnId)); + LOG.info("Opened " + JavaUtils.txnIdToString(txnId)); ctx.setHeartbeater(startHeartbeat(delay)); return txnId; } catch (TException e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 3c32de9..07d0f11 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -120,6 +120,7 @@ import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot; import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.lockmgr.LockException; import org.apache.hadoop.hive.ql.log.PerfLogger; import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; @@ -1442,8 +1443,8 @@ public Table apply(org.apache.hadoop.hive.metastore.api.Table table) { * @return the list of materialized views available for rewriting * @throws HiveException */ - public List getAllValidMaterializedViews(List tablesUsed, boolean forceMVContentsUpToDate) - throws HiveException { + public List getAllValidMaterializedViews(List tablesUsed, boolean forceMVContentsUpToDate, + HiveTxnManager txnMgr) throws HiveException { // Final result List result = new ArrayList<>(); try { @@ -1454,7 +1455,8 @@ public Table apply(org.apache.hadoop.hive.metastore.api.Table table) { // Bail out: empty list continue; } - result.addAll(getValidMaterializedViews(dbName, materializedViewNames, tablesUsed, forceMVContentsUpToDate)); + result.addAll(getValidMaterializedViews(dbName, materializedViewNames, + tablesUsed, forceMVContentsUpToDate, txnMgr)); } return result; } catch (Exception e) { @@ -1463,15 +1465,15 @@ public Table apply(org.apache.hadoop.hive.metastore.api.Table table) { } public List getValidMaterializedView(String dbName, String materializedViewName, - List tablesUsed, boolean forceMVContentsUpToDate) throws HiveException { - return getValidMaterializedViews(dbName, ImmutableList.of(materializedViewName), tablesUsed, forceMVContentsUpToDate); + List tablesUsed, boolean forceMVContentsUpToDate, HiveTxnManager txnMgr) throws HiveException { + return getValidMaterializedViews(dbName, ImmutableList.of(materializedViewName), + tablesUsed, forceMVContentsUpToDate, txnMgr); } private List getValidMaterializedViews(String dbName, List materializedViewNames, - List tablesUsed, boolean forceMVContentsUpToDate) throws HiveException { + List tablesUsed, boolean forceMVContentsUpToDate, HiveTxnManager txnMgr) throws HiveException { final String validTxnsList = conf.get(ValidTxnList.VALID_TXNS_KEY); - final ValidTxnWriteIdList currentTxnWriteIds = - SessionState.get().getTxnMgr().getValidWriteIds(tablesUsed, validTxnsList); + final ValidTxnWriteIdList currentTxnWriteIds = txnMgr.getValidWriteIds(tablesUsed, validTxnsList); final boolean tryIncrementalRewriting = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_INCREMENTAL); final boolean tryIncrementalRebuild = diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index 5cd6a5d..82c3ca9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -2151,13 +2151,13 @@ private RelNode applyMaterializedViewRewriting(RelOptPlanner planner, RelNode ba // We only retrieve the materialization corresponding to the rebuild. In turn, // we pass 'true' for the forceMVContentsUpToDate parameter, as we cannot allow the // materialization contents to be stale for a rebuild if we want to use it. - materializations = Hive.get().getValidMaterializedView(mvRebuildDbName, mvRebuildName, - getTablesUsed(basePlan), true); + materializations = db.getValidMaterializedView(mvRebuildDbName, mvRebuildName, + getTablesUsed(basePlan), true, getTxnMgr()); } else { // This is not a rebuild, we retrieve all the materializations. In turn, we do not need // to force the materialization contents to be up-to-date, as this is not a rebuild, and // we apply the user parameters (HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW) instead. - materializations = Hive.get().getAllValidMaterializedViews(getTablesUsed(basePlan), false); + materializations = db.getAllValidMaterializedViews(getTablesUsed(basePlan), false, getTxnMgr()); } // We need to use the current cluster for the scan operator on views, // otherwise the planner will throw an Exception (different planners) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index d27224b..6494506 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -1334,8 +1334,6 @@ public boolean dropTable(String catName, String dbName, String tableName) private boolean dropCreationMetadata(String catName, String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { boolean success = false; - dbName = normalizeIdentifier(dbName); - tableName = normalizeIdentifier(tableName); try { openTransaction(); MCreationMetadata mcm = getCreationMetadata(catName, dbName, tableName); @@ -1818,6 +1816,9 @@ private MCreationMetadata getCreationMetadata(String catName, String dbName, Str boolean commited = false; MCreationMetadata mcm = null; Query query = null; + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); try { openTransaction(); query = pm.newQuery( @@ -2214,7 +2215,8 @@ private MCreationMetadata convertToMCreationMetadata( String[] names = fullyQualifiedName.split("\\."); tablesUsed.add(getMTable(m.getCatName(), names[0], names[1], false).mtbl); } - return new MCreationMetadata(m.getCatName(), m.getDbName(), m.getTblName(), + return new MCreationMetadata(normalizeIdentifier(m.getCatName()), + normalizeIdentifier(m.getDbName()), normalizeIdentifier(m.getTblName()), tablesUsed, m.getValidTxnList(), System.currentTimeMillis()); }