diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 54b616e60c..5fa3d9ad42 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -92,7 +92,7 @@ public void run() { try { handle = txnHandler.getMutexAPI().acquireLock(TxnStore.MUTEX_KEY.Cleaner.name()); startedAt = System.currentTimeMillis(); - long minOpenTxnId = txnHandler.findMinOpenTxnId(); + long minOpenTxnId = txnHandler.findMinOpenTxnIdForCleaner(); for(CompactionInfo compactionInfo : txnHandler.findReadyToClean()) { clean(compactionInfo, minOpenTxnId); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java index bf91ae704c..70d63ab18b 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java @@ -225,6 +225,7 @@ public static CompactionInfoStruct compactionInfoToStruct(CompactionInfo ci) { cr.setWorkerId(ci.workerId); cr.setHighestWriteId(ci.highestWriteId); cr.setErrorMessage(ci.errorMessage); + return cr; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java index 19a95b64db..66eb58d69d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java @@ -221,8 +221,10 @@ public void markCompacted(CompactionInfo info) throws MetaException { try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); - String s = "UPDATE \"COMPACTION_QUEUE\" SET \"CQ_STATE\" = '" + READY_FOR_CLEANING + "', " + - "\"CQ_WORKER_ID\" = NULL WHERE \"CQ_ID\" = " + info.id; + String s = "UPDATE \"COMPACTION_QUEUE\" SET \"CQ_STATE\" = '" + READY_FOR_CLEANING + "', " + + "\"CQ_WORKER_ID\" = NULL, \"CQ_NEXT_TXN_ID\" = " + + "(SELECT \"NTXN_NEXT\" FROM \"NEXT_TXN_ID\")" + + " WHERE \"CQ_ID\" = " + info.id; LOG.debug("Going to execute update <" + s + ">"); int updCnt = stmt.executeUpdate(s); if (updCnt != 1) { @@ -302,57 +304,6 @@ public void markCompacted(CompactionInfo info) throws MetaException { return findReadyToClean(); } } - @Override - public long findMinOpenTxnId() throws MetaException { - Connection dbConn = null; - Statement stmt = null; - ResultSet rs = null; - try { - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - stmt = dbConn.createStatement(); - return findMinOpenTxnGLB(stmt); - } catch (SQLException e) { - LOG.error("Unable to findMinOpenTxnId() due to:" + e.getMessage()); - rollbackDBConn(dbConn); - checkRetryable(dbConn, e, "findMinOpenTxnId"); - throw new MetaException("Unable to execute findMinOpenTxnId() " + - StringUtils.stringifyException(e)); - } finally { - close(rs, stmt, dbConn); - } - } catch (RetryException e) { - return findMinOpenTxnId(); - } - } - - /** - * See doc at {@link TxnStore#findMinOpenTxnId()} - * Note that {@link #openTxns(OpenTxnRequest)} makes update of NEXT_TXN and MIN_HISTORY_LEVEL - * a single atomic operation (and no one else should update these tables except the cleaner - * which deletes rows from MIN_HISTORY_LEVEL which can only allow minOpenTxn to move higher) - */ - private long findMinOpenTxnGLB(Statement stmt) throws MetaException, SQLException { - String s = "SELECT \"NTXN_NEXT\" FROM \"NEXT_TXN_ID\""; - LOG.debug("Going to execute query <" + s + ">"); - ResultSet rs = stmt.executeQuery(s); - if (!rs.next()) { - throw new MetaException("Transaction tables not properly " + - "initialized, no record found in next_txn_id"); - } - long hwm = rs.getLong(1); - s = "SELECT MIN(\"MHL_MIN_OPEN_TXNID\") FROM \"MIN_HISTORY_LEVEL\""; - LOG.debug("Going to execute query <" + s + ">"); - rs = stmt.executeQuery(s); - rs.next(); - long minOpenTxnId = rs.getLong(1); - if(rs.wasNull()) { - return hwm; - } - //since generating new txnid uses select for update on single row in NEXT_TXN_ID - assert hwm >= minOpenTxnId : "(hwm, minOpenTxnId)=(" + hwm + "," + minOpenTxnId + ")"; - return minOpenTxnId; - } /** * This will remove an entry from the queue after @@ -523,7 +474,7 @@ public void markCleaned(CompactionInfo info) throws MetaException { } /** * Clean up entries from TXN_TO_WRITE_ID table less than min_uncommited_txnid as found by - * min(NEXT_TXN_ID.ntxn_next, min(MIN_HISTORY_LEVEL.mhl_min_open_txnid), min(Aborted TXNS.txn_id)). + * min(NEXT_TXN_ID.ntxn_next, min(WRITE_SET.WS_COMMIT_ID), min(Aborted TXNS.txn_id)). */ @Override @RetrySemantics.SafeToRetry @@ -542,25 +493,26 @@ public void cleanTxnToWriteIdTable() throws MetaException { // First need to find the min_uncommitted_txnid which is currently seen by any open transactions. // If there are no txns which are currently open or aborted in the system, then current value of // NEXT_TXN_ID.ntxn_next could be min_uncommitted_txnid. - long minUncommittedTxnId = findMinOpenTxnGLB(stmt); - - // If there are aborted txns, then the minimum aborted txnid could be the min_uncommitted_txnid - // if lesser than both NEXT_TXN_ID.ntxn_next and min(MIN_HISTORY_LEVEL .mhl_min_open_txnid). - String s = "SELECT MIN(\"TXN_ID\") FROM \"TXNS\" WHERE \"TXN_STATE\" = " + quoteChar(TXN_ABORTED); + String s = "SELECT MIN(\"RES\".\"ID\") AS \"ID\" FROM (" + + "SELECT MIN(\"NTXN_NEXT\") AS \"ID\" FROM \"NEXT_TXN_ID\" " + + "UNION " + + "SELECT MIN(\"WS_COMMIT_ID\") AS \"ID\" FROM \"WRITE_SET\" " + + "UNION " + + "SELECT MIN(\"TXN_ID\") FROM \"TXNS\" WHERE \"TXN_STATE\" = " + quoteChar(TXN_ABORTED) + + ") \"RES\""; LOG.debug("Going to execute query <" + s + ">"); rs = stmt.executeQuery(s); - if (rs.next()) { - long minAbortedTxnId = rs.getLong(1); - if (minAbortedTxnId > 0) { - minUncommittedTxnId = Math.min(minAbortedTxnId, minUncommittedTxnId); - } + if (!rs.next()) { + throw new MetaException("Transaction tables not properly initialized, no record found in NEXT_TXN_ID"); } + long minUncommitedTxnid = rs.getLong(1); + // As all txns below min_uncommitted_txnid are either committed or empty_aborted, we are allowed // to cleanup the entries less than min_uncommitted_txnid from the TXN_TO_WRITE_ID table. - s = "DELETE FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_TXNID\" < " + minUncommittedTxnId; + s = "DELETE FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_TXNID\" < " + minUncommitedTxnid; LOG.debug("Going to execute delete <" + s + ">"); int rc = stmt.executeUpdate(s); - LOG.info("Removed " + rc + " rows from TXN_TO_WRITE_ID with Txn Low-Water-Mark: " + minUncommittedTxnId); + LOG.info("Removed " + rc + " rows from TXN_TO_WRITE_ID with Txn Low-Water-Mark: " + minUncommitedTxnid); LOG.debug("Going to commit"); dbConn.commit(); @@ -1168,6 +1120,44 @@ public void setHadoopJobId(String hadoopJobId, long id) { setHadoopJobId(hadoopJobId, id); } } + + @Override + @RetrySemantics.Idempotent + public long findMinOpenTxnIdForCleaner() throws MetaException{ + Connection dbConn = null; + Statement stmt = null; + ResultSet rs = null; + try { + try { + dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); + stmt = dbConn.createStatement(); + String query = "SELECT MIN(\"RES\".\"ID\") AS \"ID\" FROM (" + + "SELECT MIN(\"TXN_ID\") AS \"ID\" FROM \"TXNS\" WHERE \"TXN_STATE\" = " + quoteChar(TXN_OPEN) + + " UNION " + + "SELECT MIN(\"CQ_NEXT_TXN_ID\") AS \"ID\" FROM \"COMPACTION_QUEUE\" WHERE \"CQ_STATE\" = " + + quoteChar(READY_FOR_CLEANING) + + " UNION " + + "SELECT \"NTXN_NEXT\" FROM \"NEXT_TXN_ID\"" + + ") \"RES\""; + LOG.debug("Going to execute query <" + query + ">"); + rs = stmt.executeQuery(query); + if (!rs.next()) { + throw new MetaException("Transaction tables not properly initialized, no record found in NEXT_TXN_ID"); + } + return rs.getLong(1); + } catch (SQLException e) { + LOG.error("Unable to getMinOpenTxnIdForCleaner", e); + rollbackDBConn(dbConn); + checkRetryable(dbConn, e, "getMinOpenTxnForCleaner"); + throw new MetaException("Unable to execute getMinOpenTxnIfForCleaner() " + + StringUtils.stringifyException(e)); + } finally { + close(rs, stmt, dbConn); + } + } catch (RetryException e) { + return findMinOpenTxnIdForCleaner(); + } + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java index ef88240a79..970574273f 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java @@ -170,7 +170,8 @@ public static synchronized void prepDb(Configuration conf) throws Exception { " CQ_HIGHEST_WRITE_ID bigint," + " CQ_META_INFO varchar(2048) for bit data," + " CQ_HADOOP_JOB_ID varchar(32)," + - " CQ_ERROR_MESSAGE clob)"); + " CQ_ERROR_MESSAGE clob," + + " CQ_NEXT_TXN_ID bigint)"); stmt.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)"); stmt.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)"); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index 74ef88545e..8fe088139f 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -631,48 +631,6 @@ public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException { pst.execute(); } - // Need to register minimum open txnid for current transactions into MIN_HISTORY table. - // For a single txn we can do it in a single insert. With multiple txns calculating the - // minOpenTxnId for every insert is not cost effective, so caching the value - if (txnIds.size() == 1) { - s = "INSERT INTO \"MIN_HISTORY_LEVEL\" (\"MHL_TXNID\",\"MHL_MIN_OPEN_TXNID\") " + - "SELECT ?, MIN(\"TXN_ID\") FROM \"TXNS\" WHERE \"TXN_STATE\" = " + quoteChar(TXN_OPEN); - LOG.debug("Going to execute query <" + s + ">"); - try (PreparedStatement pstmt = dbConn.prepareStatement(s)) { - pstmt.setLong(1, txnIds.get(0)); - pstmt.execute(); - } - LOG.info("Added entries to MIN_HISTORY_LEVEL with a single query for current txn: " + txnIds); - } else { - s = "SELECT MIN(\"TXN_ID\") FROM \"TXNS\" WHERE \"TXN_STATE\" = " + quoteChar(TXN_OPEN); - LOG.debug("Going to execute query <" + s + ">"); - long minOpenTxnId = -1L; - try(ResultSet minOpenTxnIdRs = stmt.executeQuery(s)) { - if (!minOpenTxnIdRs.next()) { - throw new IllegalStateException("Scalar query returned no rows?!?!!"); - } - // TXNS table should have at least one entry because we just inserted the newly opened txns. - // So, min(txn_id) would be a non-zero txnid. - minOpenTxnId = minOpenTxnIdRs.getLong(1); - } - - assert (minOpenTxnId > 0); - rows.clear(); - for (long txnId = first; txnId < first + numTxns; txnId++) { - rows.add(txnId + ", " + minOpenTxnId); - } - - // Insert transaction entries into MIN_HISTORY_LEVEL. - List inserts = sqlGenerator.createInsertValuesStmt( - "\"MIN_HISTORY_LEVEL\" (\"MHL_TXNID\", \"MHL_MIN_OPEN_TXNID\")", rows); - for (String insert : inserts) { - LOG.debug("Going to execute insert <" + insert + ">"); - stmt.execute(insert); - } - LOG.info("Added entries to MIN_HISTORY_LEVEL for current txns: (" + txnIds - + ") with min_open_txn: " + minOpenTxnId); - } - if (rqst.isSetReplPolicy()) { List rowsRepl = new ArrayList<>(); for (PreparedStatement pst : insertPreparedStmts) { @@ -1324,10 +1282,6 @@ public void commitTxn(CommitTxnRequest rqst) s = "DELETE FROM \"TXNS\" WHERE \"TXN_ID\" = " + txnid; LOG.debug("Going to execute update <" + s + ">"); stmt.executeUpdate(s); - s = "DELETE FROM \"MIN_HISTORY_LEVEL\" WHERE \"MHL_TXNID\" = " + txnid; - LOG.debug("Going to execute update <" + s + ">"); - stmt.executeUpdate(s); - LOG.info("Removed committed transaction: (" + txnid + ") from MIN_HISTORY_LEVEL"); s = "DELETE FROM \"MATERIALIZATION_REBUILD_LOCKS\" WHERE \"MRL_TXN_ID\" = " + txnid; LOG.debug("Going to execute update <" + s + ">"); @@ -4247,25 +4201,6 @@ private int abortTxns(Connection dbConn, List txnids, boolean checkHeartbe updateCnt += stmt.executeUpdate(query); } - // As current txn is aborted, this won't read any data from other txns. So, it is safe to unregister - // the min_open_txnid from MIN_HISTORY_LEVEL for the aborted txns. Even if the txns in the list are - // partially aborted, it is safe to delete from MIN_HISTORY_LEVEL as the remaining txns are either - // already committed or aborted. - queries.clear(); - prefix.setLength(0); - suffix.setLength(0); - - prefix.append("DELETE FROM \"MIN_HISTORY_LEVEL\" WHERE "); - - TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "\"MHL_TXNID\"", false, false); - - for (String query : queries) { - LOG.debug("Going to execute update <" + query + ">"); - int rc = stmt.executeUpdate(query); - LOG.debug("Deleted " + rc + " records from MIN_HISTORY_LEVEL"); - } - LOG.info("Removed aborted transactions: (" + txnids + ") from MIN_HISTORY_LEVEL"); - if (updateCnt < txnids.size() && isStrict) { /** * have to bail in this case since we don't know which transactions were not Aborted and diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java index 41d2e7924b..87130a519d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java @@ -359,16 +359,6 @@ void onRename(String oldCatName, String oldDbName, String oldTabName, String old @RetrySemantics.ReadOnly List findReadyToClean() throws MetaException; - /** - * Returns the smallest txnid that could be seen in open state across all active transactions in - * the system or {@code NEXT_TXN_ID.NTXN_NEXT} if there are no active transactions, i.e. the - * smallest txnid that can be seen as unresolved in the whole system. Even if a transaction - * is opened concurrently with this call it cannot have an id less than what this method returns. - * @return transaction ID - */ - @RetrySemantics.ReadOnly - long findMinOpenTxnId() throws MetaException; - /** * This will remove an entry from the queue after * it has been compacted. @@ -517,4 +507,12 @@ void onRename(String oldCatName, String oldDbName, String oldTabName, String old */ @RetrySemantics.Idempotent void addWriteNotificationLog(AcidWriteEvent acidWriteEvent) throws MetaException; + + /** + * Return the currently seen minimum open transaction ID. + * @return minimum transaction ID + * @throws MetaException + */ + @RetrySemantics.Idempotent + long findMinOpenTxnIdForCleaner() throws MetaException; } diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql index 48ad676231..e6602a437a 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql @@ -602,7 +602,8 @@ CREATE TABLE COMPACTION_QUEUE ( CQ_HIGHEST_WRITE_ID bigint, CQ_META_INFO varchar(2048) for bit data, CQ_HADOOP_JOB_ID varchar(32), - CQ_ERROR_MESSAGE clob + CQ_ERROR_MESSAGE clob, + CQ_NEXT_TXN_ID bigint ); CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( @@ -666,14 +667,6 @@ CREATE TABLE NEXT_WRITE_ID ( CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); -CREATE TABLE MIN_HISTORY_LEVEL ( - MHL_TXNID bigint NOT NULL, - MHL_MIN_OPEN_TXNID bigint NOT NULL, - PRIMARY KEY(MHL_TXNID) -); - -CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID); - CREATE TABLE MATERIALIZATION_REBUILD_LOCKS ( MRL_TXN_ID BIGINT NOT NULL, MRL_DB_NAME VARCHAR(128) NOT NULL, diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql index 7a230bde3a..f1ca9327d8 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql @@ -61,5 +61,9 @@ CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_ -- HIVE-22872 ALTER TABLE "SCHEDULED_QUERIES" ADD "ACTIVE_EXECUTION_ID" bigint; +-- HIVE-23107 +ALTER TABLE COMPACTION_QUEUE ADD CQ_NEXT_TXN_ID bigint; +DROP TABLE MIN_HISTORY_LEVEL; + -- This needs to be the last thing done. Insert any changes above this line. UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; \ No newline at end of file diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql index 9ed7f4f819..f02bf5b00b 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql @@ -1015,6 +1015,7 @@ CREATE TABLE COMPACTION_QUEUE( CQ_META_INFO varbinary(2048) NULL, CQ_HADOOP_JOB_ID nvarchar(128) NULL, CQ_ERROR_MESSAGE varchar(max) NULL, + CQ_NEXT_TXN_ID bigint NOT NULL, PRIMARY KEY CLUSTERED ( CQ_ID ASC @@ -1199,17 +1200,6 @@ CREATE TABLE NEXT_WRITE_ID ( CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); -CREATE TABLE MIN_HISTORY_LEVEL ( - MHL_TXNID bigint NOT NULL, - MHL_MIN_OPEN_TXNID bigint NOT NULL, -PRIMARY KEY CLUSTERED -( - MHL_TXNID ASC -) -); - -CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID); - CREATE TABLE MATERIALIZATION_REBUILD_LOCKS ( MRL_TXN_ID bigint NOT NULL, MRL_DB_NAME nvarchar(128) NOT NULL, diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql index 12d24e9a56..1ae74a542e 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql @@ -65,6 +65,10 @@ CREATE INDEX IDX_SCHEDULED_EX_SQ_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_QUERY_ -- HIVE-23033 INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 WHERE NOT EXISTS (SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE); +-- HIVE-23107 +ALTER TABLE COMPACTION_QUEUE bigint CQ_NEXT_TXN_ID NOT NULL; +DROP TABLE MIN_HISTORY_LEVEL; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql index bc34b511a9..88e0455fb6 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -1071,7 +1071,8 @@ CREATE TABLE COMPACTION_QUEUE ( CQ_HIGHEST_WRITE_ID bigint, CQ_META_INFO varbinary(2048), CQ_HADOOP_JOB_ID varchar(32), - CQ_ERROR_MESSAGE mediumtext + CQ_ERROR_MESSAGE mediumtext, + CQ_NEXT_TXN_ID bigint ) ENGINE=InnoDB DEFAULT CHARSET=latin1; CREATE TABLE COMPLETED_COMPACTIONS ( @@ -1133,14 +1134,6 @@ CREATE TABLE NEXT_WRITE_ID ( CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); -CREATE TABLE MIN_HISTORY_LEVEL ( - MHL_TXNID bigint NOT NULL, - MHL_MIN_OPEN_TXNID bigint NOT NULL, - PRIMARY KEY(MHL_TXNID) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID); - CREATE TABLE MATERIALIZATION_REBUILD_LOCKS ( MRL_TXN_ID bigint NOT NULL, MRL_DB_NAME VARCHAR(128) NOT NULL, diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql index 13f03bce6d..a5e39079aa 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql @@ -65,6 +65,10 @@ CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_ -- HIVE-22872 ALTER TABLE SCHEDULED_QUERIES ADD COLUMN ACTIVE_EXECUTION_ID INTEGER ; +-- HIVE-23107 +ALTER TABLE COMPACTION_QUEUE ADD CQ_NEXT_TXN_ID bigint; +DROP TABLE MIN_HISTORY_LEVEL; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql index 8482b5942c..4309fed5ee 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql @@ -1052,7 +1052,8 @@ CREATE TABLE COMPACTION_QUEUE ( CQ_HIGHEST_WRITE_ID NUMBER(19), CQ_META_INFO BLOB, CQ_HADOOP_JOB_ID varchar2(32), - CQ_ERROR_MESSAGE CLOB + CQ_ERROR_MESSAGE CLOB, + CQ_NEXT_TXN_ID NUMBER(19) ) ROWDEPENDENCIES; CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( @@ -1114,14 +1115,6 @@ CREATE TABLE NEXT_WRITE_ID ( CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); -CREATE TABLE MIN_HISTORY_LEVEL ( - MHL_TXNID NUMBER(19) NOT NULL, - MHL_MIN_OPEN_TXNID NUMBER(19) NOT NULL, - PRIMARY KEY(MHL_TXNID) -); - -CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID); - CREATE TABLE MATERIALIZATION_REBUILD_LOCKS ( MRL_TXN_ID NUMBER NOT NULL, MRL_DB_NAME VARCHAR(128) NOT NULL, diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql index cbfdd861fd..4ee58d3168 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql @@ -65,6 +65,10 @@ CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_ -- HIVE-22872 ALTER TABLE SCHEDULED_QUERIES ADD ACTIVE_EXECUTION_ID number(19); +-- HIVE-23107 +ALTER TABLE COMPACTION_QUEUE ADD CQ_NEXT_TXN_ID NUMBER(19); +DROP TABLE MIN_HISTORY_LEVEL; + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS Status from dual; diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql index aa35a7a7b3..37f6a350cd 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql @@ -1738,7 +1738,8 @@ CREATE TABLE "COMPACTION_QUEUE" ( "CQ_HIGHEST_WRITE_ID" bigint, "CQ_META_INFO" bytea, "CQ_HADOOP_JOB_ID" varchar(32), - "CQ_ERROR_MESSAGE" text + "CQ_ERROR_MESSAGE" text, + "CQ_NEXT_TXN_ID" bigint ); CREATE TABLE "NEXT_COMPACTION_QUEUE_ID" ( @@ -1800,14 +1801,6 @@ CREATE TABLE "NEXT_WRITE_ID" ( CREATE UNIQUE INDEX "NEXT_WRITE_ID_IDX" ON "NEXT_WRITE_ID" ("NWI_DATABASE", "NWI_TABLE"); -CREATE TABLE "MIN_HISTORY_LEVEL" ( - "MHL_TXNID" bigint NOT NULL, - "MHL_MIN_OPEN_TXNID" bigint NOT NULL, - PRIMARY KEY("MHL_TXNID") -); - -CREATE INDEX "MIN_HISTORY_LEVEL_IDX" ON "MIN_HISTORY_LEVEL" ("MHL_MIN_OPEN_TXNID"); - CREATE TABLE "MATERIALIZATION_REBUILD_LOCKS" ( "MRL_TXN_ID" bigint NOT NULL, "MRL_DB_NAME" varchar(128) NOT NULL, diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql index 9462328a5f..ae4480e032 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql @@ -196,6 +196,10 @@ CREATE INDEX "COMPLETED_COMPACTIONS_RES" ON "COMPLETED_COMPACTIONS" ("CC_DATABAS -- HIVE-22872 ALTER TABLE "SCHEDULED_QUERIES" ADD "ACTIVE_EXECUTION_ID" bigint; +-- HIVE-23107 +ALTER TABLE "COMPACTION_QUEUE" ADD "CQ_NEXT_TXN_ID" bigint; +DROP TABLE MIN_HISTORY_LEVEL; + -- These lines need to be last. Insert any changes above. UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0';