diff --git ql/src/test/results/clientpositive/stats_nonpart.q.out ql/src/test/results/clientpositive/stats_nonpart.q.out
index cded846ade..7bf9943f2b 100644
--- ql/src/test/results/clientpositive/stats_nonpart.q.out
+++ ql/src/test/results/clientpositive/stats_nonpart.q.out
@@ -47,45 +47,12 @@ PREHOOK: type: QUERY
POSTHOOK: query: explain select count(*) from stats_nonpartitioned
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: stats_nonpartitioned
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
- Select Operator
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
- Group By Operator
- aggregations: count()
- mode: hash
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE
- Reduce Output Operator
- sort order:
- Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE
- value expressions: _col0 (type: bigint)
- Execution mode: vectorized
- Reduce Operator Tree:
- Group By Operator
- aggregations: count(VALUE._col0)
- mode: mergepartial
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
- limit: -1
+ limit: 1
Processor Tree:
ListSink
@@ -265,45 +232,12 @@ PREHOOK: type: QUERY
POSTHOOK: query: explain select count(*) from stats_nonpartitioned
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: stats_nonpartitioned
- Statistics: Num rows: 2 Data size: 7200 Basic stats: COMPLETE Column stats: COMPLETE
- Select Operator
- Statistics: Num rows: 2 Data size: 7200 Basic stats: COMPLETE Column stats: COMPLETE
- Group By Operator
- aggregations: count()
- mode: hash
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- sort order:
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- value expressions: _col0 (type: bigint)
- Execution mode: vectorized
- Reduce Operator Tree:
- Group By Operator
- aggregations: count(VALUE._col0)
- mode: mergepartial
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
- limit: -1
+ limit: 1
Processor Tree:
ListSink
@@ -321,47 +255,12 @@ PREHOOK: type: QUERY
POSTHOOK: query: explain select count(key) from stats_nonpartitioned
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: stats_nonpartitioned
- Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- Select Operator
- expressions: key (type: int)
- outputColumnNames: key
- Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- Group By Operator
- aggregations: count(key)
- mode: hash
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- sort order:
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- value expressions: _col0 (type: bigint)
- Execution mode: vectorized
- Reduce Operator Tree:
- Group By Operator
- aggregations: count(VALUE._col0)
- mode: mergepartial
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
- limit: -1
+ limit: 1
Processor Tree:
ListSink
@@ -389,45 +288,12 @@ PREHOOK: type: QUERY
POSTHOOK: query: explain select count(*) from stats_nonpartitioned
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: stats_nonpartitioned
- Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
- Select Operator
- Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
- Group By Operator
- aggregations: count()
- mode: hash
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- sort order:
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- value expressions: _col0 (type: bigint)
- Execution mode: vectorized
- Reduce Operator Tree:
- Group By Operator
- aggregations: count(VALUE._col0)
- mode: mergepartial
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
- limit: -1
+ limit: 1
Processor Tree:
ListSink
@@ -445,47 +311,12 @@ PREHOOK: type: QUERY
POSTHOOK: query: explain select count(key) from stats_nonpartitioned
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: stats_nonpartitioned
- Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- Select Operator
- expressions: key (type: int)
- outputColumnNames: key
- Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- Group By Operator
- aggregations: count(key)
- mode: hash
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- sort order:
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- value expressions: _col0 (type: bigint)
- Execution mode: vectorized
- Reduce Operator Tree:
- Group By Operator
- aggregations: count(VALUE._col0)
- mode: mergepartial
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
- limit: -1
+ limit: 1
Processor Tree:
ListSink
diff --git standalone-metastore/pom.xml standalone-metastore/pom.xml
index 1e77285e25..9bdece7dcc 100644
--- standalone-metastore/pom.xml
+++ standalone-metastore/pom.xml
@@ -30,7 +30,7 @@
Hive Standalone Metastore
- 3.1.0
+ 4.0.0
UTF-8
diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 28426b2647..85d9b1de3e 100644
--- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -4094,7 +4094,7 @@ public void alterTable(String catName, String dbname, String name, Table newTabl
if (newTable.getValidWriteIdList() != null &&
TxnUtils.isTransactionalTable(newTable)) {
// Check concurrent INSERT case and set false to the flag.
- if (isCurrentStatsValidForTheQuery(oldt, newt.getTxnId(), newt.getWriteIdList(),
+ if (!isCurrentStatsValidForTheQuery(oldt, newt.getTxnId(), newt.getWriteIdList(),
-1, true)) {
StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE);
LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " +
@@ -12235,6 +12235,11 @@ private boolean isCurrentStatsValidForTheQuery(
long queryTxnId, String queryValidWriteIdList,
long statsWriteId, boolean checkConcurrentWrites)
throws MetaException {
+ // if statsWriteIdList is null,
+ // return true since the stats does not seem to be transactional.
+ if (statsWriteIdList == null) {
+ return true;
+ }
// If the current query is a stats updater, then we can return true
// to avoid implementing a logic inside TxnIdUtils.checkEquivalentWriteIds().
if (statsTxnId == queryTxnId) {
diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
index ff68eba4df..50d9c5b0cf 100644
--- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
+++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
@@ -55,7 +55,6 @@
private Long numTrues;
private Long numFalses;
private long lastAnalyzed;
- private long txnId;
public MPartitionColumnStatistics() {}
@@ -279,12 +278,4 @@ public void setDecimalHighValue(String decimalHighValue) {
public void setBitVector(byte[] bitVector) {
this.bitVector = bitVector;
}
-
- public long getTxnId() {
- return txnId;
- }
-
- public void setTxnId(long txnId) {
- this.txnId = txnId;
- }
}
diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
index 9d687e4efb..731cd6f7fa 100644
--- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
+++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
@@ -53,7 +53,6 @@
private Long numTrues;
private Long numFalses;
private long lastAnalyzed;
- private long txnId;
public MTableColumnStatistics() {}
@@ -270,12 +269,4 @@ public void setDecimalHighValue(String decimalHighValue) {
public void setBitVector(byte[] bitVector) {
this.bitVector = bitVector;
}
-
- public long getTxnId() {
- return txnId;
- }
-
- public void setTxnId(long txnId) {
- this.txnId = txnId;
- }
}
diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index f2642cf758..7d8f1647d6 100644
--- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@ -594,64 +594,47 @@ public void cleanEmptyAbortedTxns() throws MetaException {
StringBuilder suffix = new StringBuilder();
// Turn off COLUMN_STATS_ACCURATE for txnids' components in TBLS and PARTITIONS
- for (Long txnId : txnids) {
- // Get table ids for the current txnId.
- s = "select tbl_id from TBLS where txn_id = " + txnId;
- LOG.debug("Going to execute query <" + s + ">");
- rs = stmt.executeQuery(s);
- List tblIds = new ArrayList<>();
- while (rs.next()) {
- tblIds.add(rs.getLong(1));
- }
- close(rs);
- if(tblIds.size() <= 0) {
- continue;
- }
-
- // Update COLUMN_STATS_AcCURATE.BASIC_STATS to false for each tableId.
- prefix.append("delete from TABLE_PARAMS " +
- " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and ");
- suffix.append("");
- TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, tblIds, "tbl_id", true, false);
-
- for (String query : queries) {
- LOG.debug("Going to execute update <" + query + ">");
- int rc = stmt.executeUpdate(query);
- LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from TBLS");
- }
-
- queries.clear();
- prefix.setLength(0);
- suffix.setLength(0);
+ prefix.append("select tbl_id from TBLS where ");
+ suffix.append("");
+ TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "txn_id", true, false);
- // Get partition ids for the current txnId.
- s = "select part_id from PARTITIONS where txn_id = " + txnId;
- LOG.debug("Going to execute query <" + s + ">");
- rs = stmt.executeQuery(s);
- List ptnIds = new ArrayList<>();
- while (rs.next()) ptnIds.add(rs.getLong(1));
- close(rs);
- if(ptnIds.size() <= 0) {
- continue;
- }
+ // Delete COLUMN_STATS_ACCURATE.BASIC_STATS rows from TABLE_PARAMS for the txnids.
+ List finalCommands = new ArrayList<>(queries.size());
+ for (int i = 0; i < queries.size(); i++) {
+ String query = queries.get(i);
+ finalCommands.add(i, new StringBuilder("delete from TABLE_PARAMS " +
+ " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and tbl_id in ("));
+ finalCommands.get(i).append(query + ")");
+ LOG.debug("Going to execute update <" + finalCommands.get(i) + ">");
+ int rc = stmt.executeUpdate(finalCommands.get(i).toString());
+ LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from TBLS");
+ }
- // Update COLUMN_STATS_AcCURATE.BASIC_STATS to false for each ptnId.
- prefix.append("delete from PARTITION_PARAMS " +
- " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and ");
- suffix.append("");
- TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, ptnIds, "part_id", true, false);
+ queries.clear();
+ prefix.setLength(0);
+ suffix.setLength(0);
+ finalCommands.clear();
- for (String query : queries) {
- LOG.debug("Going to execute update <" + query + ">");
- int rc = stmt.executeUpdate(query);
- LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from PARTITIONS");
- }
+ // Delete COLUMN_STATS_ACCURATE.BASIC_STATS rows from PARTITIONS_PARAMS for the txnids.
+ prefix.append("select part_id from PARTITIONS where ");
+ suffix.append("");
+ TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "txn_id", true, false);
- queries.clear();
- prefix.setLength(0);
- suffix.setLength(0);
+ for (int i = 0; i < queries.size(); i++) {
+ String query = queries.get(i);
+ finalCommands.add(i, new StringBuilder("delete from PARTITION_PARAMS " +
+ " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and part_id in ("));
+ finalCommands.get(i).append(query + ")");
+ LOG.debug("Going to execute update <" + finalCommands.get(i) + ">");
+ int rc = stmt.executeUpdate(finalCommands.get(i).toString());
+ LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from PARTITIONS");
}
+ queries.clear();
+ prefix.setLength(0);
+ suffix.setLength(0);
+ finalCommands.clear();
+
// Delete from TXNS.
prefix.append("delete from TXNS where ");
suffix.append("");
diff --git standalone-metastore/src/main/resources/package.jdo standalone-metastore/src/main/resources/package.jdo
index 3997f5305f..92b803f350 100644
--- standalone-metastore/src/main/resources/package.jdo
+++ standalone-metastore/src/main/resources/package.jdo
@@ -1001,9 +1001,6 @@
-
-
-
@@ -1074,9 +1071,6 @@
-
-
-
diff --git standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
index 280fd4acda..e818e1b7e2 100644
--- standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
+++ standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
@@ -47,7 +47,7 @@ CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT
CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
-CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "TXN_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB);
+CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER);
@@ -75,7 +75,7 @@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "
CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
-CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N', "TXN_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB);
+CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N');
CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
@@ -106,8 +106,7 @@ CREATE TABLE "APP"."TAB_COL_STATS"(
"LAST_ANALYZED" BIGINT,
"CS_ID" BIGINT NOT NULL,
"TBL_ID" BIGINT NOT NULL,
- "BIT_VECTOR" BLOB,
- "TXN_ID" BIGINT DEFAULT 0
+ "BIT_VECTOR" BLOB
);
CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
@@ -156,8 +155,7 @@ CREATE TABLE "APP"."PART_COL_STATS"(
"NUM_FALSES" BIGINT,
"LAST_ANALYZED" BIGINT,
"CS_ID" BIGINT NOT NULL,
- "PART_ID" BIGINT NOT NULL,
- "TXN_ID" BIGINT DEFAULT 0
+ "PART_ID" BIGINT NOT NULL
);
CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
@@ -375,6 +373,7 @@ ALTER TABLE "APP"."MV_CREATION_METADATA" ADD CONSTRAINT "MV_CREATION_METADATA_PK
ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLG_PK" PRIMARY KEY ("CTLG_ID");
+
-- foreign
ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
diff --git standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql
index f92f13cbf0..062e374ac0 100644
--- standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql
+++ standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql
@@ -106,8 +106,7 @@ CREATE TABLE "APP"."TAB_COL_STATS"(
"LAST_ANALYZED" BIGINT,
"CS_ID" BIGINT NOT NULL,
"TBL_ID" BIGINT NOT NULL,
- "BIT_VECTOR" BLOB,
- "TXN_ID" BIGINT DEFAULT 0
+ "BIT_VECTOR" BLOB
);
CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
@@ -156,8 +155,7 @@ CREATE TABLE "APP"."PART_COL_STATS"(
"NUM_FALSES" BIGINT,
"LAST_ANALYZED" BIGINT,
"CS_ID" BIGINT NOT NULL,
- "PART_ID" BIGINT NOT NULL,
- "TXN_ID" BIGINT DEFAULT 0
+ "PART_ID" BIGINT NOT NULL
);
CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
diff --git standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
index 94f8192056..38eecd970a 100644
--- standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
+++ standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
@@ -4,8 +4,6 @@ ALTER TABLE "APP"."TBLS" ADD WRITEID_LIST CLOB;
ALTER TABLE "APP"."TBLS" ADD TXN_ID bigint DEFAULT 0;
ALTER TABLE "APP"."PARTITIONS" ADD WRITEID_LIST CLOB;
ALTER TABLE "APP"."PARTITIONS" ADD TXN_ID bigint DEFAULT 0;
-ALTER TABLE "APP"."TAB_COL_STATS" ADD TXN_ID bigint DEFAULT 0;
-ALTER TABLE "APP"."PART_COL_STATS" ADD TXN_ID bigint DEFAULT 0;
-- This needs to be the last thing done. Insert any changes above this line.
UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
diff --git standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
index f20f910dc3..c88fb18973 100644
--- standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
+++ standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
@@ -94,8 +94,7 @@ CREATE TABLE PART_COL_STATS
PART_ID bigint NULL,
PARTITION_NAME nvarchar(767) NOT NULL,
"TABLE_NAME" nvarchar(256) NOT NULL,
- "CAT_NAME" nvarchar(256) NOT NULL,
- TXN_ID bigint NULL
+ "CAT_NAME" nvarchar(256) NOT NULL
);
ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -145,9 +144,7 @@ CREATE TABLE PARTITIONS
LAST_ACCESS_TIME int NOT NULL,
PART_NAME nvarchar(767) NULL,
SD_ID bigint NULL,
- TBL_ID bigint NULL,
- TXN_ID bigint NULL,
- WRITEID_LIST text NULL
+ TBL_ID bigint NULL
);
ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
@@ -241,8 +238,7 @@ CREATE TABLE TAB_COL_STATS
NUM_TRUES bigint NULL,
TBL_ID bigint NULL,
"TABLE_NAME" nvarchar(256) NOT NULL,
- "CAT_NAME" nvarchar(256) NOT NULL,
- TXN_ID bigint NULL
+ "CAT_NAME" nvarchar(256) NOT NULL
);
ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -373,9 +369,7 @@ CREATE TABLE TBLS
TBL_TYPE nvarchar(128) NULL,
VIEW_EXPANDED_TEXT text NULL,
VIEW_ORIGINAL_TEXT text NULL,
- IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0,
- TXN_ID bigint NULL,
- WRITEID_LIST text NULL
+ IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0
);
ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
diff --git standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
index 22637c5721..1f31341113 100644
--- standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
+++ standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
@@ -94,8 +94,7 @@ CREATE TABLE PART_COL_STATS
PART_ID bigint NULL,
PARTITION_NAME nvarchar(767) NOT NULL,
"TABLE_NAME" nvarchar(256) NOT NULL,
- "CAT_NAME" nvarchar(256) NOT NULL,
- TXN_ID bigint NULL
+ "CAT_NAME" nvarchar(256) NOT NULL
);
ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -245,8 +244,7 @@ CREATE TABLE TAB_COL_STATS
NUM_TRUES bigint NULL,
TBL_ID bigint NULL,
"TABLE_NAME" nvarchar(256) NOT NULL,
- "CAT_NAME" nvarchar(256) NOT NULL,
- TXN_ID bigint NULL
+ "CAT_NAME" nvarchar(256) NOT NULL
);
ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
diff --git standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
index f0cbf6cdf0..594d165ef2 100644
--- standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
+++ standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
@@ -5,8 +5,6 @@ ALTER TABLE TBLS ADD WRITEID_LIST text NULL;
ALTER TABLE TBLS ADD TXN_ID bigint NULL;
ALTER TABLE PARTITIONS ADD WRITEID_LIST text NULL;
ALTER TABLE PARTITIONS ADD TXN_ID bigint NULL;
-ALTER TABLE TAB_COL_STATS ADD TXN_ID bigint NULL;
-ALTER TABLE PART_COL_STATS ADD TXN_ID bigint NULL;
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
diff --git standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
index 6e34ab5a7f..c54df55ed9 100644
--- standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
+++ standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
@@ -222,8 +222,6 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` (
`PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
`SD_ID` bigint(20) DEFAULT NULL,
`TBL_ID` bigint(20) DEFAULT NULL,
- `TXN_ID` bigint(20) DEFAULT 0,
- `WRITEID_LIST` text DEFAULT NULL,
PRIMARY KEY (`PART_ID`),
UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
KEY `PARTITIONS_N49` (`TBL_ID`),
@@ -627,8 +625,6 @@ CREATE TABLE IF NOT EXISTS `TBLS` (
`VIEW_EXPANDED_TEXT` mediumtext,
`VIEW_ORIGINAL_TEXT` mediumtext,
`IS_REWRITE_ENABLED` bit(1) NOT NULL DEFAULT 0,
- `TXN_ID` bigint(20) DEFAULT 0,
- `WRITEID_LIST` text DEFAULT NULL,
PRIMARY KEY (`TBL_ID`),
UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
KEY `TBLS_N50` (`SD_ID`),
@@ -724,7 +720,6 @@ CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
`NUM_TRUES` bigint(20),
`NUM_FALSES` bigint(20),
`LAST_ANALYZED` bigint(20) NOT NULL,
- `TXN_ID` bigint(20) DEFAULT 0,
PRIMARY KEY (`CS_ID`),
CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
@@ -755,7 +750,6 @@ CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
`NUM_TRUES` bigint(20),
`NUM_FALSES` bigint(20),
`LAST_ANALYZED` bigint(20) NOT NULL,
- `TXN_ID` bigint(20) DEFAULT 0,
PRIMARY KEY (`CS_ID`),
CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
diff --git standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
index f8f229d8a5..90f45ac224 100644
--- standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
+++ standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
@@ -730,7 +730,6 @@ CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
`NUM_TRUES` bigint(20),
`NUM_FALSES` bigint(20),
`LAST_ANALYZED` bigint(20) NOT NULL,
- `TXN_ID` bigint(20) DEFAULT 0,
PRIMARY KEY (`CS_ID`),
CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
@@ -762,7 +761,6 @@ CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
`NUM_TRUES` bigint(20),
`NUM_FALSES` bigint(20),
`LAST_ANALYZED` bigint(20) NOT NULL,
- `TXN_ID` bigint(20) DEFAULT 0,
PRIMARY KEY (`CS_ID`),
CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
diff --git standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
index 4ca584c598..dc011c245d 100644
--- standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
+++ standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
@@ -5,8 +5,6 @@ ALTER TABLE TBLS ADD TXN_ID bigint;
ALTER TABLE TBLS ADD WRITEID_LIST CLOB;
ALTER TABLE PARTITIONS ADD TXN_ID bigint;
ALTER TABLE PARTITIONS ADD WRITEID_LIST CLOB;
-ALTER TABLE TAB_COL_STATS ADD TXN_ID bigint;
-ALTER TABLE PART_COL_STATS ADD TXN_ID bigint;
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
diff --git standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
index abdb9844cb..63cc1f75e2 100644
--- standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
+++ standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
@@ -162,9 +162,7 @@ CREATE TABLE PARTITIONS
LAST_ACCESS_TIME NUMBER (10) NOT NULL,
PART_NAME VARCHAR2(767) NULL,
SD_ID NUMBER NULL,
- TBL_ID NUMBER NULL,
- TXN_ID NUMBER NULL,
- WRITEID_LIST CLOB NULL
+ TBL_ID NUMBER NULL
);
ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
@@ -394,9 +392,7 @@ CREATE TABLE TBLS
TBL_TYPE VARCHAR2(128) NULL,
VIEW_EXPANDED_TEXT CLOB NULL,
VIEW_ORIGINAL_TEXT CLOB NULL,
- IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)),
- TXN_ID NUMBER NULL,
- WRITEID_LIST CLOB NULL
+ IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0))
);
ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
@@ -529,8 +525,7 @@ CREATE TABLE TAB_COL_STATS (
MAX_COL_LEN NUMBER,
NUM_TRUES NUMBER,
NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL,
- TXN_ID NUMBER NULL
+ LAST_ANALYZED NUMBER NOT NULL
);
CREATE TABLE VERSION (
@@ -568,8 +563,7 @@ CREATE TABLE PART_COL_STATS (
MAX_COL_LEN NUMBER,
NUM_TRUES NUMBER,
NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL,
- TXN_ID NUMBER NULL
+ LAST_ANALYZED NUMBER NOT NULL
);
ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
@@ -1140,6 +1134,7 @@ CREATE TABLE RUNTIME_STATS (
CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+
-- -----------------------------------------------------------------
-- Record schema version. Should be the last step in the init script
-- -----------------------------------------------------------------
diff --git standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
index a143fd2e7f..cc08dc1db9 100644
--- standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
+++ standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
@@ -535,8 +535,7 @@ CREATE TABLE TAB_COL_STATS (
MAX_COL_LEN NUMBER,
NUM_TRUES NUMBER,
NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL,
- TXN_ID NUMBER NULL
+ LAST_ANALYZED NUMBER NOT NULL
);
ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
@@ -576,8 +575,7 @@ CREATE TABLE PART_COL_STATS (
MAX_COL_LEN NUMBER,
NUM_TRUES NUMBER,
NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL,
- TXN_ID NUMBER NULL
+ LAST_ANALYZED NUMBER NOT NULL
);
ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
diff --git standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
index 7ac4d40803..9e1e6cb539 100644
--- standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
+++ standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
@@ -4,8 +4,6 @@ ALTER TABLE TBLS ADD TXN_ID number NULL;
ALTER TABLE TBLS ADD WRITEID_LIST CLOB NULL;
ALTER TABLE PARTITIONS ADD TXN_ID number NULL;
ALTER TABLE PARTITIONS ADD WRITEID_LIST CLOB NULL;
-ALTER TABLE TAB_COL_STATS ADD TXN_ID number NULL;
-ALTER TABLE PART_COL_STATS ADD TXN_ID number NULL;
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
diff --git standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
index 449f295456..97697f8bf8 100644
--- standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
+++ standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
@@ -166,9 +166,7 @@ CREATE TABLE "PARTITIONS" (
"LAST_ACCESS_TIME" bigint NOT NULL,
"PART_NAME" character varying(767) DEFAULT NULL::character varying,
"SD_ID" bigint,
- "TBL_ID" bigint,
- "TXN_ID" bigint,
- "WRITEID_LIST" text
+ "TBL_ID" bigint
);
@@ -390,9 +388,7 @@ CREATE TABLE "TBLS" (
"TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
"VIEW_EXPANDED_TEXT" text,
"VIEW_ORIGINAL_TEXT" text,
- "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false,
- "TXN_ID" bigint,
- "WRITEID_LIST" text
+ "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false
);
--
@@ -543,8 +539,7 @@ CREATE TABLE "TAB_COL_STATS" (
"MAX_COL_LEN" bigint,
"NUM_TRUES" bigint,
"NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL,
- "TXN_ID" bigint
+ "LAST_ANALYZED" bigint NOT NULL
);
--
@@ -582,8 +577,7 @@ CREATE TABLE "PART_COL_STATS" (
"MAX_COL_LEN" bigint,
"NUM_TRUES" bigint,
"NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL,
- "TXN_ID" bigint
+ "LAST_ANALYZED" bigint NOT NULL
);
--
@@ -1080,8 +1074,6 @@ ALTER TABLE ONLY "WM_MAPPING"
ALTER TABLE ONLY "WM_MAPPING"
ADD CONSTRAINT "UNIQUE_WM_MAPPING" UNIQUE ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
--- Transactional table stats PK constraints
-
--
-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
--
@@ -1626,8 +1618,6 @@ ALTER TABLE ONLY "MV_TABLES_USED"
ALTER TABLE ONLY "MV_TABLES_USED"
ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE;
--- Transactional table stats FK constraints
-
--
-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
--
@@ -1832,6 +1822,7 @@ CREATE TABLE RUNTIME_STATS (
CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+
-- -----------------------------------------------------------------
-- Record schema version. Should be the last step in the init script
-- -----------------------------------------------------------------
diff --git standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
index 0ead590151..c7add637e1 100644
--- standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
+++ standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
@@ -549,8 +549,7 @@ CREATE TABLE "TAB_COL_STATS" (
"MAX_COL_LEN" bigint,
"NUM_TRUES" bigint,
"NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL,
- "TXN_ID" bigint
+ "LAST_ANALYZED" bigint NOT NULL
);
--
@@ -588,8 +587,7 @@ CREATE TABLE "PART_COL_STATS" (
"MAX_COL_LEN" bigint,
"NUM_TRUES" bigint,
"NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL,
- "TXN_ID" bigint
+ "LAST_ANALYZED" bigint NOT NULL
);
--
diff --git standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
index f2bae0266b..0692db1976 100644
--- standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
+++ standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
@@ -5,8 +5,6 @@ ALTER TABLE "TBLS" ADD "TXN_ID" bigint;
ALTER TABLE "TBLS" ADD "WRITEID_LIST" text;
ALTER TABLE "PARTITIONS" ADD "TXN_ID" bigint;
ALTER TABLE "PARTITIONS" ADD "WRITEID_LIST" text;
-ALTER TABLE "TAB_COL_STATS" ADD "TXN_ID" bigint;
-ALTER TABLE "PART_COL_STATS" ADD "TXN_ID" bigint;
-- These lines need to be last. Insert any changes above.
UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1;
diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 6985736ce2..7683c00b8e 100644
--- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -19,7 +19,63 @@
package org.apache.hadoop.hive.metastore;
import org.apache.hadoop.hive.common.TableName;
-import org.apache.hadoop.hive.metastore.api.*;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.Catalog;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+import org.apache.hadoop.hive.metastore.api.ISchema;
+import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
+import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+import org.apache.hadoop.hive.metastore.api.WMNullablePool;
+import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
+import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+import org.apache.hadoop.hive.metastore.api.WMTrigger;
+import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+import org.apache.hadoop.hive.metastore.api.RuntimeStat;
+import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
+import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
+import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.api.WMMapping;
+import org.apache.hadoop.hive.metastore.api.WMPool;
+import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+import org.apache.hadoop.hive.metastore.api.ISchemaName;
+import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
import java.nio.ByteBuffer;
import java.util.ArrayList;
diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 37e9920baa..97f9464339 100644
--- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -19,7 +19,11 @@
package org.apache.hadoop.hive.metastore;
import org.apache.hadoop.hive.common.TableName;
-import org.apache.hadoop.hive.metastore.api.*;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+import org.apache.hadoop.hive.metastore.api.ISchemaName;
+import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
+import org.apache.hadoop.hive.metastore.api.Catalog;
+import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import java.nio.ByteBuffer;
import java.util.Collections;
@@ -27,6 +31,58 @@
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+import org.apache.hadoop.hive.metastore.api.ISchema;
+import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
+import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+import org.apache.hadoop.hive.metastore.api.WMNullablePool;
+import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
+import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+import org.apache.hadoop.hive.metastore.api.WMTrigger;
+import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+import org.apache.hadoop.hive.metastore.api.RuntimeStat;
+import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
+import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
+import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.api.WMMapping;
+import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;