diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java index 7f1c2f8bdc..2fa8a51fb6 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java @@ -136,6 +136,7 @@ public static synchronized void prepDb(Configuration conf) throws Exception { " HL_BLOCKEDBY_INT_ID bigint," + " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))"); stmt.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)"); + stmt.execute("CREATE INDEX HL_DTP_INDEX ON HIVE_LOCKS (HL_DB, HL_TABLE, HL_PARTITION)"); stmt.execute("CREATE TABLE NEXT_LOCK_ID (" + " NL_NEXT bigint NOT NULL)"); stmt.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)"); @@ -397,16 +398,8 @@ public static void cleanDb(Configuration conf) throws Exception { stmt = conn.createStatement(); // We want to try these, whether they succeed or fail. - try { - stmt.execute("DROP INDEX HL_TXNID_INDEX"); - } catch (SQLException e) { - if(!("42X65".equals(e.getSQLState()) && 30000 == e.getErrorCode())) { - //42X65/3000 means index doesn't exist - LOG.error("Unable to drop index HL_TXNID_INDEX " + e.getMessage() + - "State=" + e.getSQLState() + " code=" + e.getErrorCode() + " retryCount=" + retryCount); - success = false; - } - } + success &= dropIndex(stmt, "HL_TXNID_INDEX", retryCount); + success &= dropIndex(stmt, "HL_DTP_INDEX", retryCount); success &= dropTable(stmt, "TXN_COMPONENTS", retryCount); success &= dropTable(stmt, "COMPLETED_TXN_COMPONENTS", retryCount); @@ -440,6 +433,20 @@ public static void cleanDb(Configuration conf) throws Exception { throw new RuntimeException("Failed to clean up txn tables"); } + private static boolean dropIndex(Statement stmt, String index, int retryCount) { + try { + stmt.execute("DROP INDEX " + index); + } catch (SQLException e) { + if (!("42X65".equals(e.getSQLState()) && 30000 == e.getErrorCode())) { + //42X65/3000 means index doesn't exist + LOG.error("Unable to drop index {} {} State={} code={} retryCount={}", + index, e.getMessage(), e.getSQLState(), e.getErrorCode(), retryCount); + return false; + } + } + return true; + } + private static boolean dropTable(Statement stmt, String name, int retryCount) throws SQLException { for (int i = 0; i < 3; i++) { try { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index f53aebe4ad..44d98f28e0 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -41,8 +41,6 @@ import java.util.Objects; import java.util.Properties; import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; @@ -324,7 +322,6 @@ public void setConf(Configuration conf) { numOpenTxns = Metrics.getOrCreateGauge(MetricsConstants.NUM_OPEN_TXNS); timeout = MetastoreConf.getTimeVar(conf, ConfVars.TXN_TIMEOUT, TimeUnit.MILLISECONDS); - buildJumpTable(); retryInterval = MetastoreConf.getTimeVar(conf, ConfVars.HMS_HANDLER_INTERVAL, TimeUnit.MILLISECONDS); retryLimit = MetastoreConf.getIntVar(conf, ConfVars.HMS_HANDLER_ATTEMPTS); @@ -2608,7 +2605,7 @@ private static String normalizeCase(String s) { } private LockResponse checkLockWithRetry(Connection dbConn, long extLockId, long txnId) - throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, MetaException { + throws NoSuchLockException, TxnAbortedException, MetaException { try { try { lockInternal(); @@ -2616,7 +2613,7 @@ private LockResponse checkLockWithRetry(Connection dbConn, long extLockId, long //should only get here if retrying this op dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); } - return checkLock(dbConn, extLockId); + return checkLock(dbConn, extLockId, txnId); } catch (SQLException e) { LOG.debug("Going to rollback"); rollbackDBConn(dbConn); @@ -2676,7 +2673,7 @@ public LockResponse checkLock(CheckLockRequest rqst) //todo: strictly speaking there is a bug here. heartbeat*() commits but both heartbeat and //checkLock() are in the same retry block, so if checkLock() throws, heartbeat is also retired //extra heartbeat is logically harmless, but ... - return checkLock(dbConn, extLockId); + return checkLock(dbConn, extLockId, info.txnId); } catch (SQLException e) { LOG.debug("Going to rollback"); rollbackDBConn(dbConn); @@ -4310,9 +4307,9 @@ private static boolean isValidTxn(long txnId) { * checkLock() will in the worst case keep locks in Waiting state a little longer. */ @RetrySemantics.SafeToRetry("See @SafeToRetry") - private LockResponse checkLock(Connection dbConn, long extLockId) - throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, MetaException, SQLException { - TxnStore.MutexAPI.LockHandle handle = null; + private LockResponse checkLock(Connection dbConn, long extLockId, long txnId) + throws NoSuchLockException, TxnAbortedException, MetaException, SQLException { + Statement stmt = null; ResultSet rs = null; LockResponse response = new LockResponse(); @@ -4328,27 +4325,13 @@ private LockResponse checkLock(Connection dbConn, long extLockId) */ boolean isPartOfDynamicPartitionInsert = true; try { - /** - * checkLock() must be mutex'd against any other checkLock to make sure 2 conflicting locks - * are not granted by parallel checkLock() calls. - */ - handle = getMutexAPI().acquireLock(MUTEX_KEY.CheckLock.name()); List locksBeingChecked = getLockInfoFromLockId(dbConn, extLockId);//being acquired now response.setLockid(extLockId); - LOG.debug("checkLock(): Setting savepoint. extLockId=" + JavaUtils.lockIdToString(extLockId)); - Savepoint save = dbConn.setSavepoint(); - StringBuilder query = new StringBuilder("SELECT \"HL_LOCK_EXT_ID\", " + - "\"HL_LOCK_INT_ID\", \"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", \"HL_LOCK_STATE\", " + - "\"HL_LOCK_TYPE\", \"HL_TXNID\" FROM \"HIVE_LOCKS\" WHERE \"HL_DB\" IN ("); - - Set strings = new HashSet<>(locksBeingChecked.size()); - //This the set of entities that the statement represented by extLockId wants to update List writeSet = new ArrayList<>(); for (LockInfo info : locksBeingChecked) { - strings.add(info.db); if(!isPartOfDynamicPartitionInsert && info.type == LockType.SHARED_WRITE) { writeSet.add(info); } @@ -4406,155 +4389,66 @@ private LockResponse checkLock(Connection dbConn, long extLockId) close(rs, stmt, null); } - boolean first = true; - for (String s : strings) { - if (first) first = false; - else query.append(", "); - query.append('\''); - query.append(s); - query.append('\''); - } - query.append(")"); - - // If any of the table requests are null, then I need to pull all the - // table locks for this db. - boolean sawNull = false; - strings.clear(); - for (LockInfo info : locksBeingChecked) { - if (info.table == null) { - sawNull = true; - break; - } else { - strings.add(info.table); - } - } - if (!sawNull) { - query.append(" AND (\"HL_TABLE\" IS NULL OR \"HL_TABLE\" IN("); - first = true; - for (String s : strings) { - if (first) first = false; - else query.append(", "); - query.append('\''); - query.append(s); - query.append('\''); - } - query.append("))"); - - // If any of the partition requests are null, then I need to pull all - // partition locks for this table. - sawNull = false; - strings.clear(); - for (LockInfo info : locksBeingChecked) { - if (info.partition == null) { - sawNull = true; - break; - } else { - strings.add(info.partition); - } - } - if (!sawNull) { - query.append(" AND (\"HL_PARTITION\" IS NULL OR \"HL_PARTITION\" IN("); - first = true; - for (String s : strings) { - if (first) first = false; - else query.append(", "); - query.append('\''); - query.append(s); - query.append('\''); - } - query.append("))"); - } - } - query.append(" AND \"HL_LOCK_EXT_ID\" < ").append(extLockId); + String queryStr = " * FROM (" + + " SELECT \"HL_LOCK_EXT_ID\", \"HL_LOCK_INT_ID\", \"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", \"HL_LOCK_STATE\", \"HL_LOCK_TYPE\" FROM \"HIVE_LOCKS\"" + + " WHERE \"HL_LOCK_EXT_ID\" < " + extLockId + ") \"LS\"" + + " INNER JOIN (" + + " SELECT \"HL_LOCK_INT_ID\", \"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", \"HL_LOCK_TYPE\" FROM \"HIVE_LOCKS\" WHERE \"HL_LOCK_EXT_ID\" = " + extLockId + ") \"LBC\"" + + " ON \"LS\".\"HL_DB\" = \"LBC\".\"HL_DB\"" + + " AND (\"LS\".\"HL_TABLE\" IS NULL OR \"LBC\".\"HL_TABLE\" IS NULL OR \"LS\".\"HL_TABLE\" = \"LBC\".\"HL_TABLE\"" + + " AND (\"LS\".\"HL_PARTITION\" IS NULL OR \"LBC\".\"HL_PARTITION\" IS NULL OR \"LS\".\"HL_PARTITION\" = \"LBC\".\"HL_PARTITION\"))" + + " WHERE "; + + String[] whereStr = { + // exclusive + " \"LBC\".\"HL_LOCK_TYPE\"='e'" + + " AND NOT (\"LBC\".\"HL_TABLE\" IS NOT NULL AND \"LS\".\"HL_TABLE\" IS NULL AND \"LS\".\"HL_LOCK_TYPE\"='r')", + // shared-write + " \"LBC\".\"HL_LOCK_TYPE\"='w' AND \"LS\".\"HL_LOCK_TYPE\" IN ('w','e')", + // shared-read + " \"LBC\".\"HL_LOCK_TYPE\"='r' AND \"LS\".\"HL_LOCK_TYPE\"='e'" + + " AND NOT (\"LBC\".\"HL_TABLE\" IS NULL AND \"LS\".\"HL_TABLE\" IS NOT NULL)", + }; - LOG.debug("Going to execute query <" + query.toString() + ">"); stmt = dbConn.createStatement(); - rs = stmt.executeQuery(query.toString()); - SortedSet lockSet = new TreeSet(new LockInfoComparator()); - while (rs.next()) { - lockSet.add(new LockInfo(rs)); - } - // Turn the tree set into an array so we can move back and forth easily - // in it. - LockInfo[] locks = lockSet.toArray(new LockInfo[lockSet.size()]); - if(LOG.isTraceEnabled()) { - LOG.trace("Locks to check(full): "); - for(LockInfo info : locks) { - LOG.trace(" " + info); + String query; + + if (dbProduct == MYSQL) { + List subQuery = new ArrayList<>(); + for (String cond : whereStr) { + subQuery.add("(" + sqlGenerator.addLimitClause(1, queryStr + cond) + ")"); } + query = String.join(" UNION ALL ", subQuery); + } else { + query = sqlGenerator.addLimitClause(1, queryStr + + String.join(" OR ", whereStr)); } - for (LockInfo info : locksBeingChecked) { - // If we've found it and it's already been marked acquired, - // then just look at the other locks. - if (info.state == LockState.ACQUIRED) { - /**this is what makes this method @SafeToRetry*/ - continue; - } + LOG.debug("Going to execute query <" + query + ">"); + rs = stmt.executeQuery(query); - // Look at everything in front of this lock to see if it should block - // it or not. - for (int i = locks.length - 1; i >= 0; i--) { - // Check if we're operating on the same database, if not, move on - if (!info.db.equals(locks[i].db)) { - continue; - } + if (rs.next()) { + Long blockedByExtId = rs.getLong(1); + Long blockedByIntId = rs.getLong(2); + Long intLockId = rs.getLong(8); - // If table is null on either of these, then they are claiming to - // lock the whole database and we need to check it. Otherwise, - // check if they are operating on the same table, if not, move on. - if (info.table != null && locks[i].table != null - && !info.table.equals(locks[i].table)) { - continue; - } - // if here, we may be checking a DB level lock against a Table level lock. Alternatively, - // we could have used Intention locks (for example a request for S lock on table would - // cause an IS lock DB that contains the table). Similarly, at partition level. - - // If partition is null on either of these, then they are claiming to - // lock the whole table and we need to check it. Otherwise, - // check if they are operating on the same partition, if not, move on. - if (info.partition != null && locks[i].partition != null - && !info.partition.equals(locks[i].partition)) { - continue; - } + String sqlText = "UPDATE \"HIVE_LOCKS\"" + + " SET \"HL_BLOCKEDBY_EXT_ID\"=" + blockedByExtId + ", \"HL_BLOCKEDBY_INT_ID\"=" + blockedByIntId + + " WHERE \"HL_LOCK_EXT_ID\"=" + extLockId + " AND \"HL_LOCK_INT_ID\"=" + intLockId; - // We've found something that matches what we're trying to lock, - // so figure out if we can lock it too. - LockAction lockAction = jumpTable.get(info.type).get(locks[i].type).get(locks[i].state); - LOG.debug("desired Lock: " + info + " checked Lock: " + locks[i] + " action: " + lockAction); - switch (lockAction) { - case WAIT: - if(!ignoreConflict(info, locks[i])) { - /*we acquire all locks for a given query atomically; if 1 blocks, all go into (remain) in - * Waiting state. wait() will undo any 'acquire()' which may have happened as part of - * this (metastore db) transaction and then we record which lock blocked the lock - * we were testing ('info').*/ - wait(dbConn, save); - String sqlText = "UPDATE \"HIVE_LOCKS\"" + - " SET \"HL_BLOCKEDBY_EXT_ID\"=" + locks[i].extLockId + - ", \"HL_BLOCKEDBY_INT_ID\"=" + locks[i].intLockId + - " WHERE \"HL_LOCK_EXT_ID\"=" + info.extLockId + " AND \"HL_LOCK_INT_ID\"=" + info.intLockId; - LOG.debug("Executing sql: " + sqlText); - int updCnt = stmt.executeUpdate(sqlText); - if(updCnt != 1) { - shouldNeverHappen(info.txnId, info.extLockId, info.intLockId); - } - LOG.debug("Going to commit"); - dbConn.commit(); - response.setState(LockState.WAITING); - LOG.debug("Lock(" + info + ") waiting for Lock(" + locks[i] + ")"); - return response; - } - //fall through to ACQUIRE - case ACQUIRE: - break; - case KEEP_LOOKING: - continue; - } - //if we got here, it means it's ok to acquire 'info' lock - break;// so exit the loop and check next lock + LOG.debug("Executing sql: " + sqlText); + int updCnt = stmt.executeUpdate(sqlText); + + if (updCnt != 1) { + shouldNeverHappen(txnId, extLockId, 0); } + LOG.debug("Going to commit"); + dbConn.commit(); + + response.setState(LockState.WAITING); + LOG.debug("Lock (EXT_ID:{}, INT_ID:{}) is blocked by (EXT_ID:{}, INT_ID:{})", + extLockId, intLockId, blockedByExtId, blockedByIntId); + return response; } //if here, ther were no locks that blocked any locks in 'locksBeingChecked' - acquire them all acquire(dbConn, stmt, locksBeingChecked); @@ -4565,12 +4459,10 @@ private LockResponse checkLock(Connection dbConn, long extLockId) response.setState(LockState.ACQUIRED); } finally { close(rs, stmt, null); - if(handle != null) { - handle.releaseLocks(); - } } return response; } + private void acquire(Connection dbConn, Statement stmt, List locksBeingChecked) throws SQLException, NoSuchLockException, MetaException { if(locksBeingChecked == null || locksBeingChecked.isEmpty()) { @@ -4614,47 +4506,6 @@ private void acquire(Connection dbConn, Statement stmt, List locksBein } } - /** - * the {@link #jumpTable} only deals with LockState/LockType. In some cases it's not - * sufficient. For example, an EXCLUSIVE lock on partition should prevent SHARED_READ - * on the table, but there is no reason for EXCLUSIVE on a table to prevent SHARED_READ - * on a database. Similarly, EXCLUSIVE on a partition should not conflict with SHARED_READ on - * a database. (SHARED_READ is usually acquired on a database to make sure it's not dropped - * while some operation is performed on that db (e.g. show tables, created table, etc) - * EXCLUSIVE on an object may mean it's being dropped or overwritten (for non-acid tables, - * an Insert uses EXCLUSIVE as well)). - */ - private boolean ignoreConflict(LockInfo desiredLock, LockInfo existingLock) { - return - ((desiredLock.isDbLock() && desiredLock.type == LockType.SHARED_READ && - existingLock.isTableLock() && existingLock.type == LockType.EXCLUSIVE) || - (existingLock.isDbLock() && existingLock.type == LockType.SHARED_READ && - desiredLock.isTableLock() && desiredLock.type == LockType.EXCLUSIVE) || - - (desiredLock.isDbLock() && desiredLock.type == LockType.SHARED_READ && - existingLock.isPartitionLock() && existingLock.type == LockType.EXCLUSIVE) || - (existingLock.isDbLock() && existingLock.type == LockType.SHARED_READ && - desiredLock.isPartitionLock() && desiredLock.type == LockType.EXCLUSIVE)) - || - - //different locks from same txn should not conflict with each other - (desiredLock.txnId != 0 && desiredLock.txnId == existingLock.txnId) || - //txnId=0 means it's a select or IUD which does not write to ACID table, e.g - //insert overwrite table T partition(p=1) select a,b from T and autoCommit=true - // todo: fix comment as of HIVE-14988 - (desiredLock.txnId == 0 && desiredLock.extLockId == existingLock.extLockId); - } - - private void wait(Connection dbConn, Savepoint save) throws SQLException { - // Need to rollback because we did a select that acquired locks but we didn't - // actually update anything. Also, we may have locked some locks as - // acquired that we now want to not acquire. It's ok to rollback because - // once we see one wait, we're done, we won't look for more. - // Only rollback to savepoint because we want to commit our heartbeat - // changes. - LOG.debug("Going to rollback to savepoint"); - dbConn.rollback(save); - } /** * Heartbeats on the lock table. This commits, so do not enter it with any state. * Should not be called on a lock that belongs to transaction. @@ -5134,114 +4985,6 @@ private static synchronized DataSource setupJdbcConnectionPool(Configuration con } } - private static synchronized void buildJumpTable() { - if (jumpTable != null) return; - - jumpTable = new HashMap<>(3); - - // SR: Lock we are trying to acquire is shared read - Map> m = new HashMap<>(3); - jumpTable.put(LockType.SHARED_READ, m); - - // SR.SR: Lock we are examining is shared read - Map m2 = new HashMap<>(2); - m.put(LockType.SHARED_READ, m2); - - // SR.SR.acquired Lock we are examining is acquired; We can acquire - // because two shared reads can acquire together and there must be - // nothing in front of this one to prevent acquisition. - m2.put(LockState.ACQUIRED, LockAction.ACQUIRE); - - // SR.SR.wait Lock we are examining is waiting. In this case we keep - // looking, as it's possible that something in front is blocking it or - // that the other locker hasn't checked yet and he could lock as well. - m2.put(LockState.WAITING, LockAction.KEEP_LOOKING); - - // SR.SW: Lock we are examining is shared write - m2 = new HashMap<>(2); - m.put(LockType.SHARED_WRITE, m2); - - // SR.SW.acquired Lock we are examining is acquired; We can acquire - // because a read can share with a write, and there must be - // nothing in front of this one to prevent acquisition. - m2.put(LockState.ACQUIRED, LockAction.ACQUIRE); - - // SR.SW.wait Lock we are examining is waiting. In this case we keep - // looking, as it's possible that something in front is blocking it or - // that the other locker hasn't checked yet and he could lock as well or - // that something is blocking it that would not block a read. - m2.put(LockState.WAITING, LockAction.KEEP_LOOKING); - - // SR.E: Lock we are examining is exclusive - m2 = new HashMap<>(2); - m.put(LockType.EXCLUSIVE, m2); - - // No matter whether it has acquired or not, we cannot pass an exclusive. - m2.put(LockState.ACQUIRED, LockAction.WAIT); - m2.put(LockState.WAITING, LockAction.WAIT); - - // SW: Lock we are trying to acquire is shared write - m = new HashMap<>(3); - jumpTable.put(LockType.SHARED_WRITE, m); - - // SW.SR: Lock we are examining is shared read - m2 = new HashMap<>(2); - m.put(LockType.SHARED_READ, m2); - - // SW.SR.acquired Lock we are examining is acquired; We need to keep - // looking, because there may or may not be another shared write in front - // that would block us. - m2.put(LockState.ACQUIRED, LockAction.KEEP_LOOKING); - - // SW.SR.wait Lock we are examining is waiting. In this case we keep - // looking, as it's possible that something in front is blocking it or - // that the other locker hasn't checked yet and he could lock as well. - m2.put(LockState.WAITING, LockAction.KEEP_LOOKING); - - // SW.SW: Lock we are examining is shared write - m2 = new HashMap<>(2); - m.put(LockType.SHARED_WRITE, m2); - - // Regardless of acquired or waiting, one shared write cannot pass another. - m2.put(LockState.ACQUIRED, LockAction.WAIT); - m2.put(LockState.WAITING, LockAction.WAIT); - - // SW.E: Lock we are examining is exclusive - m2 = new HashMap<>(2); - m.put(LockType.EXCLUSIVE, m2); - - // No matter whether it has acquired or not, we cannot pass an exclusive. - m2.put(LockState.ACQUIRED, LockAction.WAIT); - m2.put(LockState.WAITING, LockAction.WAIT); - - // E: Lock we are trying to acquire is exclusive - m = new HashMap<>(3); - jumpTable.put(LockType.EXCLUSIVE, m); - - // E.SR: Lock we are examining is shared read - m2 = new HashMap<>(2); - m.put(LockType.SHARED_READ, m2); - - // Exclusives can never pass - m2.put(LockState.ACQUIRED, LockAction.WAIT); - m2.put(LockState.WAITING, LockAction.WAIT); - - // E.SW: Lock we are examining is shared write - m2 = new HashMap<>(2); - m.put(LockType.SHARED_WRITE, m2); - - // Exclusives can never pass - m2.put(LockState.ACQUIRED, LockAction.WAIT); - m2.put(LockState.WAITING, LockAction.WAIT); - - // E.E: Lock we are examining is exclusive - m2 = new HashMap<>(2); - m.put(LockType.EXCLUSIVE, m2); - - // No matter whether it has acquired or not, we cannot pass an exclusive. - m2.put(LockState.ACQUIRED, LockAction.WAIT); - m2.put(LockState.WAITING, LockAction.WAIT); - } /** * Returns true if {@code ex} should be retried */ diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql index 482d8aa163..55b3b56352 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql @@ -582,6 +582,7 @@ CREATE TABLE HIVE_LOCKS ( ); CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID); +CREATE INDEX HL_DTP_INDEX ON HIVE_LOCKS (HL_DB, HL_TABLE, HL_PARTITION); CREATE TABLE NEXT_LOCK_ID ( NL_NEXT bigint NOT NULL diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql index 6fd8ae64cb..0adbdfde4b 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql @@ -56,6 +56,8 @@ ALTER TABLE COMPLETED_COMPACTIONS ADD CC_ERROR_MESSAGE clob; ALTER TABLE "APP"."KEY_CONSTRAINTS" DROP CONSTRAINT "CONSTRAINTS_PK"; ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("PARENT_TBL_ID", "CONSTRAINT_NAME", "POSITION"); +CREATE INDEX HL_DTP_INDEX ON HIVE_LOCKS (HL_DB, HL_TABLE, HL_PARTITION); + -- This needs to be the last thing done. Insert any changes above this line. UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql index dfd4bd6672..dfcdc13ce8 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql @@ -1077,6 +1077,8 @@ PRIMARY KEY CLUSTERED ) ); +CREATE INDEX HL_DTP_IDX ON HIVE_LOCKS (HL_DB, HL_TABLE, HL_PARTITION); + CREATE TABLE NEXT_COMPACTION_QUEUE_ID( NCQ_NEXT bigint NOT NULL ); diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql index fecfca8c13..290682767d 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql @@ -30,6 +30,8 @@ ALTER TABLE COMPLETED_COMPACTIONS ADD CC_ERROR_MESSAGE varchar(max) NULL; ALTER TABLE KEY_CONSTRAINTS DROP CONSTRAINT CONSTRAINTS_PK; ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (PARENT_TBL_ID, CONSTRAINT_NAME, POSITION); +CREATE INDEX HL_DTP_IDX ON HIVE_LOCKS (HL_DB, HL_TABLE, HL_PARTITION); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql index a9a09307ee..6e89de1ef6 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -1047,10 +1047,11 @@ CREATE TABLE HIVE_LOCKS ( HL_BLOCKEDBY_EXT_ID bigint, HL_BLOCKEDBY_INT_ID bigint, PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID), - KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID); +CREATE INDEX HL_DTP_IDX ON HIVE_LOCKS (HL_DB, HL_TABLE, HL_PARTITION) USING BTREE; +CREATE INDEX HL_LOCK_TYPE_IDX ON HIVE_LOCKS (HL_LOCK_TYPE); CREATE TABLE NEXT_LOCK_ID ( NL_NEXT bigint NOT NULL diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql index 70b5d9d41f..b918b164bc 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql @@ -60,6 +60,9 @@ ALTER TABLE COMPLETED_COMPACTIONS ADD CC_ERROR_MESSAGE mediumtext; ALTER TABLE `KEY_CONSTRAINTS` DROP PRIMARY KEY; ALTER TABLE `KEY_CONSTRAINTS` ADD CONSTRAINT `CONSTRAINTS_PK` PRIMARY KEY (`PARENT_TBL_ID`, `CONSTRAINT_NAME`, `POSITION`); +CREATE INDEX HL_DTP_IDX ON HIVE_LOCKS (HL_DB, HL_TABLE, HL_PARTITION) USING BTREE; +CREATE INDEX HL_LOCK_TYPE_IDX ON HIVE_LOCKS (HL_LOCK_TYPE); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql index f90d76b4e0..2b598411cf 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql @@ -1032,6 +1032,7 @@ CREATE TABLE HIVE_LOCKS ( ) ROWDEPENDENCIES; CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID); +CREATE INDEX HL_DTP_INDEX ON HIVE_LOCKS (HL_DB, HL_TABLE, HL_PARTITION); CREATE TABLE NEXT_LOCK_ID ( NL_NEXT NUMBER(19) NOT NULL diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql index 9f1b980372..221e2c2043 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql @@ -60,6 +60,8 @@ ALTER TABLE COMPLETED_COMPACTIONS ADD CC_ERROR_MESSAGE CLOB; ALTER TABLE KEY_CONSTRAINTS DROP CONSTRAINT CONSTRAINTS_PK; ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (PARENT_TBL_ID, CONSTRAINT_NAME, POSITION); +CREATE INDEX HL_DTP_INDEX ON HIVE_LOCKS (HL_DB, HL_TABLE, HL_PARTITION); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS Status from dual; diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql index b992d7a249..83c741ded8 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql @@ -1718,6 +1718,7 @@ CREATE TABLE "HIVE_LOCKS" ( ); CREATE INDEX HL_TXNID_INDEX ON "HIVE_LOCKS" USING hash ("HL_TXNID"); +CREATE INDEX HL_DTP_INDEX ON "HIVE_LOCKS" USING btree ("HL_DB", "HL_TABLE", "HL_PARTITION"); CREATE TABLE "NEXT_LOCK_ID" ( "NL_NEXT" bigint NOT NULL diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql index f9af248e27..e2266493c0 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql @@ -191,6 +191,8 @@ ALTER TABLE "COMPLETED_COMPACTIONS" ADD "CC_ERROR_MESSAGE" text; ALTER TABLE "KEY_CONSTRAINTS" DROP CONSTRAINT "KEY_CONSTRAINTS_pkey"; ALTER TABLE "KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("PARENT_TBL_ID", "CONSTRAINT_NAME", "POSITION"); +CREATE INDEX HL_DTP_INDEX ON "HIVE_LOCKS" USING btree ("HL_DB", "HL_TABLE", "HL_PARTITION"); + -- These lines need to be last. Insert any changes above. UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0';