diff --git ql/src/java/org/apache/TxnHandlerBenchRunner.java ql/src/java/org/apache/TxnHandlerBenchRunner.java new file mode 100644 index 0000000000..6a8a797a76 --- /dev/null +++ ql/src/java/org/apache/TxnHandlerBenchRunner.java @@ -0,0 +1,133 @@ +package org.apache; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.DatabaseProduct; +import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; +import org.apache.hadoop.hive.metastore.txn.TxnStore; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configuration; +import org.openjdk.jmh.annotations.*; +import org.openjdk.jmh.runner.Runner; +import org.openjdk.jmh.runner.RunnerException; +import org.openjdk.jmh.runner.options.Options; +import org.openjdk.jmh.runner.options.OptionsBuilder; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; + +@BenchmarkMode(Mode.SingleShotTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5) +@Measurement(iterations = 10) +public class TxnHandlerBenchRunner { + + @Param({"MYSQL", "POSTGRES", "SQLSERVER"}) + private String dbProduct; + @Param({"4"}) + private String numTables; + @Param({"300"}) + private String numPartitions; + @Param({"true", "false"}) + private String isContented; + + private TxnStore txnHandler; + private Map results = new ConcurrentHashMap<>(); + + public static void main(String[] args) throws RunnerException { + Options opt = new OptionsBuilder() + .include(TxnHandlerBenchRunner.class.getSimpleName()) + .syncIterations(true) + .threads(10) + .forks(1) + .build(); + new Runner(opt).run(); + } + @Setup + public void setup() throws MetaException { + System.out.println("*** Running SETUP method. ***"); + HiveConf conf = new HiveConf(); + switch (DatabaseProduct.valueOf(dbProduct)){ + case MYSQL: { + conf.set("javax.jdo.option.ConnectionURL", "jdbc:mysql://localhost:6605/upstream_hive?allowMultiQueries=true&allowPublicKeyRetrieval=true&useSSL=false"); + conf.set("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver"); + conf.set("javax.jdo.option.ConnectionUserName", "upstream_hive"); + conf.set("javax.jdo.option.ConnectionPassword", "hive"); + } break; + case ORACLE: { + conf.set("javax.jdo.option.ConnectionURL", "jdbc:oracle:thin:@localhost:1521:xe"); + conf.set("javax.jdo.option.ConnectionDriverName", "oracle.jdbc.OracleDriver"); + conf.set("javax.jdo.option.ConnectionUserName", "system"); + conf.set("javax.jdo.option.ConnectionPassword", "oracle"); + } break; + case POSTGRES: { + conf.set("javax.jdo.option.ConnectionURL", "jdbc:postgresql://localhost:54320/hive?reWriteBatchedInserts=true"); + conf.set("javax.jdo.option.ConnectionDriverName", "org.postgresql.Driver"); + conf.set("javax.jdo.option.ConnectionUserName", "hive"); + conf.set("javax.jdo.option.ConnectionPassword", "hive"); + break; + } + case SQLSERVER: { + conf.set("javax.jdo.option.ConnectionURL", "jdbc:sqlserver://localhost:1433;databaseName=hive"); + conf.set("javax.jdo.option.ConnectionDriverName", "com.microsoft.sqlserver.jdbc.SQLServerDriver"); + conf.set("javax.jdo.option.ConnectionUserName", "sa"); + conf.set("javax.jdo.option.ConnectionPassword", "hive-Test123"); + break; + } + } + conf.set("hikaricp.connectionTimeout", "180000"); + conf.set("metastore.direct.sql.max.elements.values.clause", "1200"); + + TxnDbUtil.setConfValues(conf); + txnHandler = TxnUtils.getTxnStore(conf); + + LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + Configuration ctxConfig = ctx.getConfiguration(); + ctxConfig.getLoggerConfig(TxnHandlerBenchRunner.class.getName()).setLevel(Level.INFO); + ctx.updateLoggers(ctxConfig); + } + + @TearDown(org.openjdk.jmh.annotations.Level.Iteration) + public void clearResults() { + System.out.println(results); + results.clear(); + } + + @Benchmark + public void acquireLock() throws Exception { + List lockComponents = getLockComponents(); + OpenTxnRequest rqst = new OpenTxnRequest(1, "me", "localhost"); + OpenTxnsResponse rsp = txnHandler.openTxns(rqst); + LockRequest request = new LockRequest(lockComponents, "me", "localhost"); + request.setTxnid(rsp.getTxn_ids().get(0)); + LockResponse resp = txnHandler.lock(request); + if (results.containsKey(resp.getState())) { + results.put(resp.getState(), results.get(resp.getState()) + 1); + } else { + results.put(resp.getState(), 1); + } + txnHandler.commitTxn(new CommitTxnRequest(request.getTxnid())); + } + + private List getLockComponents() { + List lockComponents = new ArrayList<>(); + long threadId = Boolean.parseBoolean(isContented) ? -1L : Thread.currentThread().getId(); + for (int i = 0; i < Integer.parseInt(numTables); i++) { + for (int j = 0; j < Integer.parseInt(numPartitions); j++) { + LockComponent comp = new LockComponent(i % 5 < 5 ? LockType.SHARED_READ : LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); + comp.setTablename("store_sales_" + (i + 1) + "_" + threadId); + comp.setPartitionname("p_" + (j + 1) + "_" + threadId); + comp.setOperationType(i % 5 < 5 ? DataOperationType.SELECT : DataOperationType.UPDATE); + lockComponents.add(comp); + } + } + return lockComponents; + } +} diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java index 49b737ecf9..fdcb6672f3 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java @@ -30,8 +30,10 @@ import java.sql.PreparedStatement; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.stream.Collectors; /** * Helper class that generates SQL queries with syntax specific to target DB @@ -292,4 +294,25 @@ public String addEscapeCharacters(String s) { return s; } + public String getDefaultValuesClause() { + if (dbProduct == DatabaseProduct.SQLSERVER) { + return "DEFAULT VALUES"; + } else { + return "VALUES(DEFAULT)"; + } + } + + /** + * Please note that for Derby, all database fields in 'parts' must be textual values. + * If you need to concatenate numbers, you should supply them with casting, e.g. TRIM(CHAR("TXN_ID")) + * @param parts sequence of elements to be concatenated together + * @return dbProduct-specific concatenation construct involving the 'parts' + */ + public String concat(String... parts) { + if (dbProduct == DatabaseProduct.DERBY || dbProduct == DatabaseProduct.ORACLE) { + return String.join(" || ", parts); + } else { + return "CONCAT(" + String.join(", ", parts) + ")"; + } + } } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index b43ff3986c..0bceb8e118 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -195,8 +195,6 @@ private static final String TXN_COMPONENTS_INSERT_QUERY = "INSERT INTO \"TXN_COMPONENTS\" (" + "\"TC_TXNID\", \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_OPERATION_TYPE\", \"TC_WRITEID\")" + " VALUES (?, ?, ?, ?, ?, ?)"; - private static final String INCREMENT_NEXT_LOCK_ID_QUERY = "UPDATE \"NEXT_LOCK_ID\" SET \"NL_NEXT\" = %s"; - private static final String UPDATE_HIVE_LOCKS_EXT_ID_QUERY = "UPDATE \"HIVE_LOCKS\" SET \"HL_LOCK_EXT_ID\" = %s WHERE \"HL_LOCK_EXT_ID\" = %s"; private static final String SELECT_WRITE_ID_QUERY = "SELECT \"T2W_WRITEID\" FROM \"TXN_TO_WRITE_ID\" WHERE" + " \"T2W_DATABASE\" = ? AND \"T2W_TABLE\" = ? AND \"T2W_TXNID\" = ?"; private static final String COMPL_TXN_COMPONENTS_INSERT_QUERY = "INSERT INTO \"COMPLETED_TXN_COMPONENTS\" " + @@ -2318,11 +2316,16 @@ public long cleanupMaterializationRebuildLocks(ValidTxnList validTxnList, long t */ @RetrySemantics.CannotRetry public LockResponse lock(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException { + long start = System.currentTimeMillis(); ConnectionLockIdPair connAndLockId = enqueueLockWithRetry(rqst); + LOG.info("EnqueueLock() took: " + (System.currentTimeMillis() - start)); try { - return checkLockWithRetry(connAndLockId.dbConn, connAndLockId.extLockId, rqst.getTxnid()); + start = System.currentTimeMillis(); + LockResponse resp = checkLockWithRetry(connAndLockId.dbConn, connAndLockId.extLockId, rqst.getTxnid()); + LOG.info("checkLock() took: " + (System.currentTimeMillis() - start)); + return resp; } - catch(NoSuchLockException e) { + catch (NoSuchLockException e) { // This should never happen, as we just added the lock id throw new MetaException("Couldn't find a lock we just created! " + e.getMessage()); } @@ -2391,19 +2394,10 @@ private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst) throws NoSuc shouldNeverHappen(txnid); } } - /* Insert txn components and hive locks (with a temp extLockId) first, before getting the next lock ID in a select-for-update. - This should minimize the scope of the S4U and decrease the table lock duration. */ - insertTxnComponents(txnid, rqst, dbConn); - long tempExtLockId = insertHiveLocksWithTemporaryExtLockId(txnid, dbConn, rqst); - /** Get the next lock id. - * This has to be atomic with adding entries to HIVE_LOCK entries (1st add in W state) to prevent a race. - * Suppose ID gen is a separate txn and 2 concurrent lock() methods are running. 1st one generates nl_next=7, - * 2nd nl_next=8. Then 8 goes first to insert into HIVE_LOCKS and acquires the locks. Then 7 unblocks, - * and add it's W locks but it won't see locks from 8 since to be 'fair' {@link #checkLock(java.sql.Connection, long)} - * doesn't block on locks acquired later than one it's checking*/ - long extLockId = getNextLockIdForUpdate(dbConn, stmt); - incrementLockIdAndUpdateHiveLocks(stmt, extLockId, tempExtLockId); + insertTxnComponents(txnid, rqst, dbConn); + long extLockId = getNextLockId(dbConn, stmt); + insertHiveLocks(dbConn, rqst, extLockId); dbConn.commit(); success = true; @@ -2429,10 +2423,12 @@ private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst) throws NoSuc } } - private long getNextLockIdForUpdate(Connection dbConn, Statement stmt) throws SQLException, MetaException { - String s = sqlGenerator.addForUpdateClause("SELECT \"NL_NEXT\" FROM \"NEXT_LOCK_ID\""); + private long getNextLockId(Connection dbConn, Statement stmt) throws SQLException, MetaException { + String s = "INSERT INTO \"NEXT_LOCK_ID\" " + sqlGenerator.getDefaultValuesClause(); LOG.debug("Going to execute query <" + s + ">"); - try (ResultSet rs = stmt.executeQuery(s)) { + String generatedColumns[] = { "NL_NEXT" }; + stmt.executeUpdate(s, generatedColumns); + try (ResultSet rs = stmt.getGeneratedKeys()) { if (!rs.next()) { LOG.debug("Going to rollback"); dbConn.rollback(); @@ -2443,16 +2439,6 @@ private long getNextLockIdForUpdate(Connection dbConn, Statement stmt) throws SQ } } - private void incrementLockIdAndUpdateHiveLocks(Statement stmt, long extLockId, long tempId) throws SQLException { - String incrCmd = String.format(INCREMENT_NEXT_LOCK_ID_QUERY, (extLockId + 1)); - // update hive locks entries with the real EXT_LOCK_ID (replace temp ID) - String updateLocksCmd = String.format(UPDATE_HIVE_LOCKS_EXT_ID_QUERY, extLockId, tempId); - LOG.debug("Going to execute updates in batch: <" + incrCmd + ">, and <" + updateLocksCmd + ">"); - stmt.addBatch(incrCmd); - stmt.addBatch(updateLocksCmd); - stmt.executeBatch(); - } - private void insertTxnComponents(long txnid, LockRequest rqst, Connection dbConn) throws SQLException { if (txnid > 0) { Map, Optional> writeIdCache = new HashMap<>(); @@ -2567,12 +2553,10 @@ private boolean shouldUpdateTxnComponent(long txnid, LockRequest rqst, LockCompo } } - private long insertHiveLocksWithTemporaryExtLockId(long txnid, Connection dbConn, LockRequest rqst) throws MetaException, SQLException { - - String lastHB = isValidTxn(txnid) ? "0" : TxnDbUtil.getEpochFn(dbProduct); + private void insertHiveLocks(Connection dbConn, LockRequest rqst, long extLockId) throws MetaException, SQLException { + String lastHB = isValidTxn(rqst.getTxnid()) ? "0" : TxnDbUtil.getEpochFn(dbProduct); String insertLocksQuery = String.format(HIVE_LOCKS_INSERT_QRY, lastHB); long intLockId = 0; - long tempExtLockId = generateTemporaryId(); try (PreparedStatement pstmt = dbConn.prepareStatement(insertLocksQuery)) { for (LockComponent lc : rqst.getComponent()) { @@ -2590,9 +2574,9 @@ private long insertHiveLocksWithTemporaryExtLockId(long txnid, Connection dbConn intLockId++; String lockType = LockTypeUtil.getEncodingAsStr(lc.getType()); - pstmt.setLong(1, tempExtLockId); + pstmt.setLong(1, extLockId); pstmt.setLong(2, intLockId); - pstmt.setLong(3, txnid); + pstmt.setLong(3, rqst.getTxnid()); pstmt.setString(4, normalizeCase(lc.getDbname())); pstmt.setString(5, normalizeCase(lc.getTablename())); pstmt.setString(6, normalizeCase(lc.getPartitionname())); @@ -2613,7 +2597,6 @@ private long insertHiveLocksWithTemporaryExtLockId(long txnid, Connection dbConn pstmt.executeBatch(); } } - return tempExtLockId; } private long generateTemporaryId() { @@ -4319,11 +4302,33 @@ private LockResponse checkLock(Connection dbConn, long extLockId, long txnId) close(rs, stmt, null); } + /** + * We use an S4U here to lock all rows in HIVE_LOCKS which contain our requested database components. + * This helps prevent a race condition where a thread with a lower lock ID than ours arrives late - + * after we have already done our conflict-check (and succeeded) but not yet acquired the locks. If it wasn't for this S4U, + * the late-comer thread would not see a problem during its conflict-check either (since our lock ID is higher AND we haven't + * acquired our locks yet) and it would go ahead and acquire its locks. Once our thread gets the execution back, it would + * also acquire the same locks.. This S4U is meant to prevent this race condition. + * The scope of this lock is limited to only our set of requested components, as opposed to using a global lock on HIVE_LOCKS. + */ + long start = System.currentTimeMillis(); + lockComponentsForUpdate(dbConn, extLockId); + LOG.info("S4U took: " + (System.currentTimeMillis() - start)); + + /** + * We block either if we find a lock request which has a lower lock ID than us (to preserve fairness), + * or a higher lock ID which has already acquired at least one of the components that we have requested. + * This latter scenario can happen if a thread has retrieved its lock ID a bit later than us, + * but got here faster and already passed through conflict-check and acquired its locks. + * This does away with 100% fairness based on enqueue lock commit order, but increases concurrency while + * maintaining an acceptable level of fairness in the system. + */ String queryStr = " \"EX\".*, \"REQ\".\"HL_LOCK_INT_ID\" AS \"REQ_LOCK_INT_ID\" FROM (" + " SELECT \"HL_LOCK_EXT_ID\", \"HL_LOCK_INT_ID\", \"HL_TXNID\", \"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\"," + " \"HL_LOCK_STATE\", \"HL_LOCK_TYPE\" FROM \"HIVE_LOCKS\"" + - " WHERE \"HL_LOCK_EXT_ID\" < " + extLockId + ") \"EX\"" + + " WHERE \"HL_LOCK_EXT_ID\" < " + extLockId + + " OR (\"HL_LOCK_EXT_ID\" > " + extLockId + " AND \"HL_LOCK_STATE\" = '" + LOCK_ACQUIRED + "')) \"EX\"" + " INNER JOIN (" + " SELECT \"HL_LOCK_INT_ID\", \"HL_TXNID\", \"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\"," + " \"HL_LOCK_TYPE\" FROM \"HIVE_LOCKS\"" + @@ -4368,7 +4373,9 @@ is performed on that db (e.g. show tables, created table, etc). stmt = dbConn.createStatement(); LOG.debug("Going to execute query <" + query + ">"); + start = System.currentTimeMillis(); rs = stmt.executeQuery(query); + LOG.info("Conflict check took: " + (System.currentTimeMillis() - start)); if (rs.next()) { // We acquire all locks for a given query atomically; if 1 blocks, all remain in Waiting state. @@ -4395,7 +4402,9 @@ is performed on that db (e.g. show tables, created table, etc). return response; } // If here, there were no locks that would block any item from 'locksBeingChecked' - acquire them all + start = System.currentTimeMillis(); acquire(dbConn, stmt, locksBeingChecked); + LOG.info("Acquire() took: " + (System.currentTimeMillis() - start)); // We acquired all the locks, so commit and return acquired. LOG.debug("Going to commit"); @@ -4407,6 +4416,37 @@ is performed on that db (e.g. show tables, created table, etc). return response; } + private void lockComponentsForUpdate(Connection dbConn, long extLockId) throws MetaException, SQLException { + + String queryStr = + "SELECT \"HL_LOCK_EXT_ID\", \"HL_LOCK_INT_ID\" FROM " + + "(SELECT \"HL_LOCK_EXT_ID\", \"HL_LOCK_INT_ID\", \"HL_LOCK_TYPE\", \"HL_TXNID\", " + + "\"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\" FROM \"HIVE_LOCKS\" WHERE \"HL_LOCK_STATE\" = 'w') \"EX\" " + + "INNER JOIN " + + "(SELECT \"HL_LOCK_TYPE\", \"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", \"HL_TXNID\" FROM \"HIVE_LOCKS\"" + + " WHERE \"HL_LOCK_EXT_ID\" = " + extLockId + ") \"REQ\" " + + "ON \"EX\".\"HL_DB\" = \"REQ\".\"HL_DB\"" + + " AND (\"EX\".\"HL_TABLE\" IS NULL OR \"REQ\".\"HL_TABLE\" IS NULL" + + " OR (\"EX\".\"HL_TABLE\" = \"REQ\".\"HL_TABLE\"" + + " AND (\"EX\".\"HL_PARTITION\" IS NULL OR \"REQ\".\"HL_PARTITION\" IS NULL" + + " OR \"EX\".\"HL_PARTITION\" = \"REQ\".\"HL_PARTITION\"))) WHERE (\"REQ\".\"HL_TXNID\" = 0 OR \"EX\".\"HL_TXNID\" != \"REQ\".\"HL_TXNID\") AND " + + // exclusive + " ((\"REQ\".\"HL_LOCK_TYPE\"='e'" + + " AND NOT (\"EX\".\"HL_TABLE\" IS NULL AND \"EX\".\"HL_LOCK_TYPE\"='r' AND \"REQ\".\"HL_TABLE\" IS NOT NULL)) OR " + + // shared-write + " (\"REQ\".\"HL_LOCK_TYPE\"='w' AND \"EX\".\"HL_LOCK_TYPE\" IN ('w','e')) OR " + + // shared-read + " (\"REQ\".\"HL_LOCK_TYPE\"='r' AND \"EX\".\"HL_LOCK_TYPE\"='e'" + + " AND NOT (\"EX\".\"HL_TABLE\" IS NOT NULL AND \"REQ\".\"HL_TABLE\" IS NULL)))"; + + + String lockComponentsQuery = sqlGenerator.addForUpdateClause(queryStr); + LOG.debug("Going to execute select for update: <" + lockComponentsQuery + ">"); + try (Statement stmt = dbConn.createStatement()) { + stmt.executeQuery(lockComponentsQuery); + } + } + private void acquire(Connection dbConn, Statement stmt, List locksBeingChecked) throws SQLException, NoSuchLockException, MetaException { if(locksBeingChecked == null || locksBeingChecked.isEmpty()) { diff --git standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql index 366b6f02c1..a08df17b4f 100644 --- standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql +++ standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql @@ -587,9 +587,8 @@ CREATE TABLE HIVE_LOCKS ( CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID); CREATE TABLE NEXT_LOCK_ID ( - NL_NEXT bigint NOT NULL + NL_NEXT bigint PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY ); -INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE COMPACTION_QUEUE ( CQ_ID bigint PRIMARY KEY, diff --git standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql index 8a3cd56658..f35437b5af 100644 --- standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql +++ standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql @@ -68,5 +68,13 @@ ALTER TABLE "APP"."DBS" ADD COLUMN "DB_MANAGED_LOCATION_URI" VARCHAR(4000); ALTER TABLE COMPACTION_QUEUE ADD CQ_NEXT_TXN_ID bigint; DROP TABLE MIN_HISTORY_LEVEL; +-- HIVE-23236 +CREATE TABLE TMP_NEXT_LOCK_ID( + NL_NEXT bigint PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY +); +INSERT INTO TMP_NEXT_LOCK_ID (NL_NEXT) SELECT NL_NEXT FROM NEXT_LOCK_ID; +DROP TABLE NEXT_LOCK_ID; +RENAME TABLE TMP_NEXT_LOCK_ID TO NEXT_LOCK_ID; + -- This needs to be the last thing done. Insert any changes above this line. UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; diff --git standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql index 2e0177723d..dae0ede61b 100644 --- standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql +++ standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql @@ -1091,11 +1091,9 @@ CREATE TABLE NEXT_COMPACTION_QUEUE_ID( INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); CREATE TABLE NEXT_LOCK_ID( - NL_NEXT bigint NOT NULL + NL_NEXT bigint NOT NULL IDENTITY(1, 1) ); -INSERT INTO NEXT_LOCK_ID VALUES(1); - CREATE TABLE NEXT_TXN_ID( NTXN_NEXT bigint NOT NULL ); diff --git standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql index 9f3951575b..cb62396ae1 100644 --- standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql +++ standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql @@ -71,6 +71,16 @@ ALTER TABLE DBS ADD DB_MANAGED_LOCATION_URI nvarchar(4000); ALTER TABLE COMPACTION_QUEUE bigint CQ_NEXT_TXN_ID NOT NULL; DROP TABLE MIN_HISTORY_LEVEL; +-- HIVE-23236 +CREATE TABLE TMP_NEXT_LOCK_ID( + NL_NEXT bigint NOT NULL IDENTITY(1, 1) +); +SET IDENTITY_INSERT TMP_NEXT_LOCK_ID ON; +INSERT INTO TMP_NEXT_LOCK_ID (NL_NEXT) SELECT NL_NEXT FROM NEXT_LOCK_ID; +SET IDENTITY_INSERT TMP_NEXT_LOCK_ID OFF; +DROP TABLE NEXT_LOCK_ID; +EXEC sp_rename 'TMP_NEXT_LOCK_ID', 'NEXT_LOCK_ID'; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE; diff --git standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql index 0512a45cad..48c294e47d 100644 --- standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql +++ standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -1054,9 +1054,8 @@ CREATE TABLE HIVE_LOCKS ( CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID); CREATE TABLE NEXT_LOCK_ID ( - NL_NEXT bigint NOT NULL + NL_NEXT bigint PRIMARY KEY AUTO_INCREMENT ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE COMPACTION_QUEUE ( CQ_ID bigint PRIMARY KEY, diff --git standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql index 4b82e36ab4..6f7438c3fc 100644 --- standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql +++ standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql @@ -72,6 +72,14 @@ ALTER TABLE DBS ADD COLUMN DB_MANAGED_LOCATION_URI VARCHAR(4000) CHARACTER SET l ALTER TABLE COMPACTION_QUEUE ADD CQ_NEXT_TXN_ID bigint; DROP TABLE MIN_HISTORY_LEVEL; +-- HIVE-23236 +CREATE TABLE TMP_NEXT_LOCK_ID( + NL_NEXT bigint PRIMARY KEY AUTO_INCREMENT +); +INSERT INTO TMP_NEXT_LOCK_ID (NL_NEXT) SELECT NL_NEXT FROM NEXT_LOCK_ID; +DROP TABLE NEXT_LOCK_ID; +RENAME TABLE TMP_NEXT_LOCK_ID TO NEXT_LOCK_ID; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE; diff --git standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql index db398e5f66..9ffe9f489d 100644 --- standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql +++ standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql @@ -1035,9 +1035,8 @@ CREATE TABLE HIVE_LOCKS ( CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID); CREATE TABLE NEXT_LOCK_ID ( - NL_NEXT NUMBER(19) NOT NULL + NL_NEXT NUMBER(19) GENERATED BY DEFAULT ON NULL AS IDENTITY PRIMARY KEY ); -INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE COMPACTION_QUEUE ( CQ_ID NUMBER(19) PRIMARY KEY, diff --git standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql index 1be83fc4a9..328290f39a 100644 --- standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql +++ standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql @@ -72,6 +72,14 @@ ALTER TABLE DBS ADD DB_MANAGED_LOCATION_URI VARCHAR2(4000) NULL; ALTER TABLE COMPACTION_QUEUE ADD CQ_NEXT_TXN_ID NUMBER(19); DROP TABLE MIN_HISTORY_LEVEL; +-- HIVE-23236 +DECLARE start_lock_id NUMBER; +BEGIN + SELECT MAX(NL_NEXT) + 1 INTO start_lock_id FROM NEXT_LOCK_ID; + EXECUTE IMMEDIATE 'CREATE SEQUENCE NEXT_LOCK_ID_SEQ INCREMENT BY 1 START WITH ' || start_lock_id || ' CACHE 20'; +END; +ALTER TABLE NEXT_LOCK_ID MODIFY NL_NEXT default NEXT_LOCK_ID_SEQ.nextval; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS Status from dual; diff --git standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql index e6e30160af..00d6b2a9f3 100644 --- standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql +++ standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql @@ -1721,9 +1721,8 @@ CREATE TABLE "HIVE_LOCKS" ( CREATE INDEX HL_TXNID_INDEX ON "HIVE_LOCKS" USING hash ("HL_TXNID"); CREATE TABLE "NEXT_LOCK_ID" ( - "NL_NEXT" bigint NOT NULL + "NL_NEXT" bigserial NOT NULL ); -INSERT INTO "NEXT_LOCK_ID" VALUES(1); CREATE TABLE "COMPACTION_QUEUE" ( "CQ_ID" bigint PRIMARY KEY, diff --git standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql index b90cecb173..28c44907fe 100644 --- standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql +++ standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql @@ -203,6 +203,17 @@ ALTER TABLE "DBS" ADD "DB_MANAGED_LOCATION_URI" character varying(4000); ALTER TABLE "COMPACTION_QUEUE" ADD "CQ_NEXT_TXN_ID" bigint; DROP TABLE "MIN_HISTORY_LEVEL"; +-- HIVE-23236 +CREATE TABLE "TMP_NEXT_LOCK_ID" ( + "NL_NEXT" bigserial NOT NULL +); +INSERT INTO "TMP_NEXT_LOCK_ID" ("NL_NEXT") SELECT "NL_NEXT" FROM "NEXT_LOCK_ID"; +DROP TABLE "NEXT_LOCK_ID"; +ALTER TABLE "TMP_NEXT_LOCK_ID" RENAME TO "NEXT_LOCK_ID"; +CREATE SEQUENCE "LOCK_ID_SEQ" MINVALUE 0 OWNED BY "NEXT_LOCK_ID"."NL_NEXT"; +SELECT setval('"LOCK_ID_SEQ"', (SELECT MAX("NL_NEXT") FROM "NEXT_LOCK_ID")); +ALTER TABLE "NEXT_LOCK_ID" ALTER "NL_NEXT" SET DEFAULT nextval('"LOCK_ID_SEQ"'); + -- These lines need to be last. Insert any changes above. UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0';