commit d186bb6cc02f68ce2f716771536b81d1851c5d74 Author: Alan Gates Date: Thu Oct 19 16:49:38 2017 -0700 HIVE-17983 Make the standalone metastore generate tarballs etc. diff --git beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java index fc0887b909..d71905e67d 100644 --- beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java +++ beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java @@ -159,12 +159,12 @@ Connection getConnectionToMetastore(boolean printInfo) private NestedScriptParser getDbCommandParser(String dbType, String metaDbType) { return HiveSchemaHelper.getDbCommandParser(dbType, dbOpts, userName, - passWord, hiveConf, metaDbType); + passWord, hiveConf, metaDbType, false); } private NestedScriptParser getDbCommandParser(String dbType) { return HiveSchemaHelper.getDbCommandParser(dbType, dbOpts, userName, - passWord, hiveConf, null); + passWord, hiveConf, null, false); } /*** @@ -804,7 +804,7 @@ boolean validateSchemaTables(Connection conn) throws HiveMetaException { private List findCreateTable(String path, List tableList) throws Exception { - NestedScriptParser sp = HiveSchemaHelper.getDbCommandParser(dbType); + NestedScriptParser sp = HiveSchemaHelper.getDbCommandParser(dbType, false); Matcher matcher = null; Pattern regexp = null; List subs = new ArrayList(); diff --git binary-package-licenses/README binary-package-licenses/README index ef127e33b0..c801896663 100644 --- binary-package-licenses/README +++ binary-package-licenses/README @@ -42,6 +42,7 @@ guava guice* hbase* hibernate-validator +Hikaricp htrace-core http-client httpclient diff --git itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java index 25bd71a561..5591bad97b 100644 --- itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java +++ itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java @@ -338,7 +338,7 @@ public void testScripts() throws Exception { String expectedSQL = StringUtils.join(resultScript, System.getProperty("line.separator")) + System.getProperty("line.separator"); File testScriptFile = generateTestScript(testScript); - String flattenedSql = HiveSchemaHelper.getDbCommandParser("derby") + String flattenedSql = HiveSchemaHelper.getDbCommandParser("derby", false) .buildCommand(testScriptFile.getParentFile().getPath(), testScriptFile.getName()); @@ -380,7 +380,7 @@ public void testNestedScriptsForDerby() throws Exception { }; File testScriptFile = generateTestScript(parentTestScript); - String flattenedSql = HiveSchemaHelper.getDbCommandParser("derby") + String flattenedSql = HiveSchemaHelper.getDbCommandParser("derby", false) .buildCommand(testScriptFile.getParentFile().getPath(), testScriptFile.getName()); assertFalse(flattenedSql.contains("RUN")); @@ -425,7 +425,7 @@ public void testNestedScriptsForMySQL() throws Exception { }; File testScriptFile = generateTestScript(parentTestScript); - String flattenedSql = HiveSchemaHelper.getDbCommandParser("mysql") + String flattenedSql = HiveSchemaHelper.getDbCommandParser("mysql", false) .buildCommand(testScriptFile.getParentFile().getPath(), testScriptFile.getName()); assertFalse(flattenedSql.contains("RUN")); @@ -467,7 +467,7 @@ public void testScriptWithDelimiter() throws Exception { String expectedSQL = StringUtils.join(resultScript, System.getProperty("line.separator")) + System.getProperty("line.separator"); File testScriptFile = generateTestScript(testScript); - NestedScriptParser testDbParser = HiveSchemaHelper.getDbCommandParser("mysql"); + NestedScriptParser testDbParser = HiveSchemaHelper.getDbCommandParser("mysql", false); String flattenedSql = testDbParser.buildCommand(testScriptFile.getParentFile().getPath(), testScriptFile.getName()); @@ -502,7 +502,7 @@ public void testScriptMultiRowComment() throws Exception { String expectedSQL = StringUtils.join(parsedScript, System.getProperty("line.separator")) + System.getProperty("line.separator"); File testScriptFile = generateTestScript(testScript); - NestedScriptParser testDbParser = HiveSchemaHelper.getDbCommandParser("mysql"); + NestedScriptParser testDbParser = HiveSchemaHelper.getDbCommandParser("mysql", false); String flattenedSql = testDbParser.buildCommand(testScriptFile.getParentFile().getPath(), testScriptFile.getName()); @@ -544,7 +544,7 @@ public void testNestedScriptsForOracle() throws Exception { }; File testScriptFile = generateTestScript(parentTestScript); - String flattenedSql = HiveSchemaHelper.getDbCommandParser("oracle") + String flattenedSql = HiveSchemaHelper.getDbCommandParser("oracle", false) .buildCommand(testScriptFile.getParentFile().getPath(), testScriptFile.getName()); assertFalse(flattenedSql.contains("@")); @@ -576,7 +576,7 @@ public void testPostgresFilter() throws Exception { }; NestedScriptParser noDbOptParser = HiveSchemaHelper - .getDbCommandParser("postgres"); + .getDbCommandParser("postgres", false); String expectedSQL = StringUtils.join( expectedScriptWithOptionPresent, System.getProperty("line.separator")) + System.getProperty("line.separator"); @@ -594,7 +594,7 @@ public void testPostgresFilter() throws Exception { NestedScriptParser dbOptParser = HiveSchemaHelper.getDbCommandParser( "postgres", PostgresCommandParser.POSTGRES_SKIP_STANDARD_STRINGS_DBOPT, - null, null, null, null); + null, null, null, null, false); expectedSQL = StringUtils.join( expectedScriptWithOptionAbsent, System.getProperty("line.separator")) + System.getProperty("line.separator"); diff --git metastore/scripts/upgrade/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql metastore/scripts/upgrade/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql deleted file mode 100644 index f5a260e687..0000000000 --- metastore/scripts/upgrade/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql +++ /dev/null @@ -1,13 +0,0 @@ -SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS MESSAGE; - -:r 026-HIVE-16556.mssql.sql -:r 027-HIVE-16575.mssql.sql -:r 028-HIVE-16922.mssql.sql -:r 029-HIVE-16997.mssql.sql -:r 030-HIVE-16886.mssql.sql -:r 031-HIVE-17566.mssql.sql -:r 033-HIVE-14498.mssql.sql -:r 034-HIVE-18489.mssql.sql - -UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; -SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS MESSAGE; diff --git metastore/scripts/upgrade/mysql/hive-txn-schema-3.0.0.mysql.sql metastore/scripts/upgrade/mysql/hive-txn-schema-3.0.0.mysql.sql deleted file mode 100644 index 41da503354..0000000000 --- metastore/scripts/upgrade/mysql/hive-txn-schema-3.0.0.mysql.sql +++ /dev/null @@ -1,138 +0,0 @@ --- Licensed to the Apache Software Foundation (ASF) under one or more --- contributor license agreements. See the NOTICE file distributed with --- this work for additional information regarding copyright ownership. --- The ASF licenses this file to You under the Apache License, Version 2.0 --- (the "License"); you may not use this file except in compliance with --- the License. You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, software --- distributed under the License is distributed on an "AS IS" BASIS, --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --- See the License for the specific language governing permissions and --- limitations under the License. - --- --- Tables for transaction management --- - -CREATE TABLE TXNS ( - TXN_ID bigint PRIMARY KEY, - TXN_STATE char(1) NOT NULL, - TXN_STARTED bigint NOT NULL, - TXN_LAST_HEARTBEAT bigint NOT NULL, - TXN_USER varchar(128) NOT NULL, - TXN_HOST varchar(128) NOT NULL, - TXN_AGENT_INFO varchar(128), - TXN_META_INFO varchar(128), - TXN_HEARTBEAT_COUNT int -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -CREATE TABLE TXN_COMPONENTS ( - TC_TXNID bigint NOT NULL, - TC_DATABASE varchar(128) NOT NULL, - TC_TABLE varchar(128) NOT NULL, - TC_PARTITION varchar(767), - TC_OPERATION_TYPE char(1) NOT NULL, - FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); - -CREATE TABLE COMPLETED_TXN_COMPONENTS ( - CTC_TXNID bigint NOT NULL, - CTC_DATABASE varchar(128) NOT NULL, - CTC_TABLE varchar(256), - CTC_PARTITION varchar(767), - CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX2 ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE; - -CREATE TABLE NEXT_TXN_ID ( - NTXN_NEXT bigint NOT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -INSERT INTO NEXT_TXN_ID VALUES(1); - -CREATE TABLE HIVE_LOCKS ( - HL_LOCK_EXT_ID bigint NOT NULL, - HL_LOCK_INT_ID bigint NOT NULL, - HL_TXNID bigint, - HL_DB varchar(128) NOT NULL, - HL_TABLE varchar(128), - HL_PARTITION varchar(767), - HL_LOCK_STATE char(1) not null, - HL_LOCK_TYPE char(1) not null, - HL_LAST_HEARTBEAT bigint NOT NULL, - HL_ACQUIRED_AT bigint, - HL_USER varchar(128) NOT NULL, - HL_HOST varchar(128) NOT NULL, - HL_HEARTBEAT_COUNT int, - HL_AGENT_INFO varchar(128), - HL_BLOCKEDBY_EXT_ID bigint, - HL_BLOCKEDBY_INT_ID bigint, - PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID), - KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID); - -CREATE TABLE NEXT_LOCK_ID ( - NL_NEXT bigint NOT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -INSERT INTO NEXT_LOCK_ID VALUES(1); - -CREATE TABLE COMPACTION_QUEUE ( - CQ_ID bigint PRIMARY KEY, - CQ_DATABASE varchar(128) NOT NULL, - CQ_TABLE varchar(128) NOT NULL, - CQ_PARTITION varchar(767), - CQ_STATE char(1) NOT NULL, - CQ_TYPE char(1) NOT NULL, - CQ_TBLPROPERTIES varchar(2048), - CQ_WORKER_ID varchar(128), - CQ_START bigint, - CQ_RUN_AS varchar(128), - CQ_HIGHEST_TXN_ID bigint, - CQ_META_INFO varbinary(2048), - CQ_HADOOP_JOB_ID varchar(32) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -CREATE TABLE COMPLETED_COMPACTIONS ( - CC_ID bigint PRIMARY KEY, - CC_DATABASE varchar(128) NOT NULL, - CC_TABLE varchar(128) NOT NULL, - CC_PARTITION varchar(767), - CC_STATE char(1) NOT NULL, - CC_TYPE char(1) NOT NULL, - CC_TBLPROPERTIES varchar(2048), - CC_WORKER_ID varchar(128), - CC_START bigint, - CC_END bigint, - CC_RUN_AS varchar(128), - CC_HIGHEST_TXN_ID bigint, - CC_META_INFO varbinary(2048), - CC_HADOOP_JOB_ID varchar(32) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( - NCQ_NEXT bigint NOT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); - -CREATE TABLE AUX_TABLE ( - MT_KEY1 varchar(128) NOT NULL, - MT_KEY2 bigint NOT NULL, - MT_COMMENT varchar(255), - PRIMARY KEY(MT_KEY1, MT_KEY2) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -CREATE TABLE WRITE_SET ( - WS_DATABASE varchar(128) NOT NULL, - WS_TABLE varchar(128) NOT NULL, - WS_PARTITION varchar(767), - WS_TXNID bigint NOT NULL, - WS_COMMIT_ID bigint NOT NULL, - WS_OPERATION_TYPE char(1) NOT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1; diff --git metastore/scripts/upgrade/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql metastore/scripts/upgrade/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql deleted file mode 100644 index 4446b62955..0000000000 --- metastore/scripts/upgrade/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql +++ /dev/null @@ -1,13 +0,0 @@ -SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' '; - -SOURCE 041-HIVE-16556.mysql.sql; -SOURCE 042-HIVE-16575.mysql.sql; -SOURCE 043-HIVE-16922.mysql.sql; -SOURCE 044-HIVE-16997.mysql.sql; -SOURCE 045-HIVE-16886.mysql.sql; -SOURCE 046-HIVE-17566.mysql.sql; -SOURCE 048-HIVE-14498.mysql.sql; -SOURCE 049-HIVE-18489.mysql.sql; - -UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; -SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' '; diff --git metastore/scripts/upgrade/oracle/hive-txn-schema-3.0.0.oracle.sql metastore/scripts/upgrade/oracle/hive-txn-schema-3.0.0.oracle.sql deleted file mode 100644 index 5fcf037a35..0000000000 --- metastore/scripts/upgrade/oracle/hive-txn-schema-3.0.0.oracle.sql +++ /dev/null @@ -1,136 +0,0 @@ --- Licensed to the Apache Software Foundation (ASF) under one or more --- contributor license agreements. See the NOTICE file distributed with --- this work for additional information regarding copyright ownership. --- The ASF licenses this file to You under the Apache License, Version 2.0 --- (the License); you may not use this file except in compliance with --- the License. You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, software --- distributed under the License is distributed on an AS IS BASIS, --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --- See the License for the specific language governing permissions and --- limitations under the License. - --- --- Tables for transaction management --- - -CREATE TABLE TXNS ( - TXN_ID NUMBER(19) PRIMARY KEY, - TXN_STATE char(1) NOT NULL, - TXN_STARTED NUMBER(19) NOT NULL, - TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL, - TXN_USER varchar(128) NOT NULL, - TXN_HOST varchar(128) NOT NULL, - TXN_AGENT_INFO varchar2(128), - TXN_META_INFO varchar2(128), - TXN_HEARTBEAT_COUNT number(10) -) ROWDEPENDENCIES; - -CREATE TABLE TXN_COMPONENTS ( - TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID), - TC_DATABASE VARCHAR2(128) NOT NULL, - TC_TABLE VARCHAR2(256), - TC_PARTITION VARCHAR2(767) NULL, - TC_OPERATION_TYPE char(1) NOT NULL -) ROWDEPENDENCIES; - -CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); - -CREATE TABLE COMPLETED_TXN_COMPONENTS ( - CTC_TXNID NUMBER(19), - CTC_DATABASE varchar(128) NOT NULL, - CTC_TABLE varchar(128), - CTC_PARTITION varchar(767), - CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL -) ROWDEPENDENCIES; - -CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); - -CREATE TABLE NEXT_TXN_ID ( - NTXN_NEXT NUMBER(19) NOT NULL -); -INSERT INTO NEXT_TXN_ID VALUES(1); - -CREATE TABLE HIVE_LOCKS ( - HL_LOCK_EXT_ID NUMBER(19) NOT NULL, - HL_LOCK_INT_ID NUMBER(19) NOT NULL, - HL_TXNID NUMBER(19), - HL_DB VARCHAR2(128) NOT NULL, - HL_TABLE VARCHAR2(128), - HL_PARTITION VARCHAR2(767), - HL_LOCK_STATE CHAR(1) NOT NULL, - HL_LOCK_TYPE CHAR(1) NOT NULL, - HL_LAST_HEARTBEAT NUMBER(19) NOT NULL, - HL_ACQUIRED_AT NUMBER(19), - HL_USER varchar(128) NOT NULL, - HL_HOST varchar(128) NOT NULL, - HL_HEARTBEAT_COUNT number(10), - HL_AGENT_INFO varchar2(128), - HL_BLOCKEDBY_EXT_ID number(19), - HL_BLOCKEDBY_INT_ID number(19), - PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID) -) ROWDEPENDENCIES; - -CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID); - -CREATE TABLE NEXT_LOCK_ID ( - NL_NEXT NUMBER(19) NOT NULL -); -INSERT INTO NEXT_LOCK_ID VALUES(1); - -CREATE TABLE COMPACTION_QUEUE ( - CQ_ID NUMBER(19) PRIMARY KEY, - CQ_DATABASE varchar(128) NOT NULL, - CQ_TABLE varchar(128) NOT NULL, - CQ_PARTITION varchar(767), - CQ_STATE char(1) NOT NULL, - CQ_TYPE char(1) NOT NULL, - CQ_TBLPROPERTIES varchar(2048), - CQ_WORKER_ID varchar(128), - CQ_START NUMBER(19), - CQ_RUN_AS varchar(128), - CQ_HIGHEST_TXN_ID NUMBER(19), - CQ_META_INFO BLOB, - CQ_HADOOP_JOB_ID varchar2(32) -) ROWDEPENDENCIES; - -CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( - NCQ_NEXT NUMBER(19) NOT NULL -); -INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); - -CREATE TABLE COMPLETED_COMPACTIONS ( - CC_ID NUMBER(19) PRIMARY KEY, - CC_DATABASE varchar(128) NOT NULL, - CC_TABLE varchar(128) NOT NULL, - CC_PARTITION varchar(767), - CC_STATE char(1) NOT NULL, - CC_TYPE char(1) NOT NULL, - CC_TBLPROPERTIES varchar(2048), - CC_WORKER_ID varchar(128), - CC_START NUMBER(19), - CC_END NUMBER(19), - CC_RUN_AS varchar(128), - CC_HIGHEST_TXN_ID NUMBER(19), - CC_META_INFO BLOB, - CC_HADOOP_JOB_ID varchar2(32) -) ROWDEPENDENCIES; - -CREATE TABLE AUX_TABLE ( - MT_KEY1 varchar2(128) NOT NULL, - MT_KEY2 number(19) NOT NULL, - MT_COMMENT varchar2(255), - PRIMARY KEY(MT_KEY1, MT_KEY2) -); - -CREATE TABLE WRITE_SET ( - WS_DATABASE varchar2(128) NOT NULL, - WS_TABLE varchar2(128) NOT NULL, - WS_PARTITION varchar2(767), - WS_TXNID number(19) NOT NULL, - WS_COMMIT_ID number(19) NOT NULL, - WS_OPERATION_TYPE char(1) NOT NULL -); diff --git metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql deleted file mode 100644 index a4acb9eb30..0000000000 --- metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql +++ /dev/null @@ -1,13 +0,0 @@ -SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual; - -@041-HIVE-16556.oracle.sql; -@042-HIVE-16575.oracle.sql; -@043-HIVE-16922.oracle.sql; -@044-HIVE-16997.oracle.sql; -@045-HIVE-16886.oracle.sql; -@046-HIVE-17566.oracle.sql; -@048-HIVE-14498.oracle.sql; -@049-HIVE-18489.oracle.sql; - -UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; -SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual; diff --git metastore/scripts/upgrade/postgres/hive-txn-schema-3.0.0.postgres.sql metastore/scripts/upgrade/postgres/hive-txn-schema-3.0.0.postgres.sql deleted file mode 100644 index 3eb07300c6..0000000000 --- metastore/scripts/upgrade/postgres/hive-txn-schema-3.0.0.postgres.sql +++ /dev/null @@ -1,136 +0,0 @@ --- Licensed to the Apache Software Foundation (ASF) under one or more --- contributor license agreements. See the NOTICE file distributed with --- this work for additional information regarding copyright ownership. --- The ASF licenses this file to You under the Apache License, Version 2.0 --- (the "License"); you may not use this file except in compliance with --- the License. You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, software --- distributed under the License is distributed on an "AS IS" BASIS, --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --- See the License for the specific language governing permissions and --- limitations under the License. - --- --- Tables for transaction management --- - -CREATE TABLE TXNS ( - TXN_ID bigint PRIMARY KEY, - TXN_STATE char(1) NOT NULL, - TXN_STARTED bigint NOT NULL, - TXN_LAST_HEARTBEAT bigint NOT NULL, - TXN_USER varchar(128) NOT NULL, - TXN_HOST varchar(128) NOT NULL, - TXN_AGENT_INFO varchar(128), - TXN_META_INFO varchar(128), - TXN_HEARTBEAT_COUNT integer -); - -CREATE TABLE TXN_COMPONENTS ( - TC_TXNID bigint REFERENCES TXNS (TXN_ID), - TC_DATABASE varchar(128) NOT NULL, - TC_TABLE varchar(128), - TC_PARTITION varchar(767) DEFAULT NULL, - TC_OPERATION_TYPE char(1) NOT NULL -); - -CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS USING hash (TC_TXNID); - -CREATE TABLE COMPLETED_TXN_COMPONENTS ( - CTC_TXNID bigint, - CTC_DATABASE varchar(128) NOT NULL, - CTC_TABLE varchar(256), - CTC_PARTITION varchar(767), - CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL -); - -CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); - -CREATE TABLE NEXT_TXN_ID ( - NTXN_NEXT bigint NOT NULL -); -INSERT INTO NEXT_TXN_ID VALUES(1); - -CREATE TABLE HIVE_LOCKS ( - HL_LOCK_EXT_ID bigint NOT NULL, - HL_LOCK_INT_ID bigint NOT NULL, - HL_TXNID bigint, - HL_DB varchar(128) NOT NULL, - HL_TABLE varchar(128), - HL_PARTITION varchar(767) DEFAULT NULL, - HL_LOCK_STATE char(1) NOT NULL, - HL_LOCK_TYPE char(1) NOT NULL, - HL_LAST_HEARTBEAT bigint NOT NULL, - HL_ACQUIRED_AT bigint, - HL_USER varchar(128) NOT NULL, - HL_HOST varchar(128) NOT NULL, - HL_HEARTBEAT_COUNT integer, - HL_AGENT_INFO varchar(128), - HL_BLOCKEDBY_EXT_ID bigint, - HL_BLOCKEDBY_INT_ID bigint, - PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID) -); - -CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID); - -CREATE TABLE NEXT_LOCK_ID ( - NL_NEXT bigint NOT NULL -); -INSERT INTO NEXT_LOCK_ID VALUES(1); - -CREATE TABLE COMPACTION_QUEUE ( - CQ_ID bigint PRIMARY KEY, - CQ_DATABASE varchar(128) NOT NULL, - CQ_TABLE varchar(128) NOT NULL, - CQ_PARTITION varchar(767), - CQ_STATE char(1) NOT NULL, - CQ_TYPE char(1) NOT NULL, - CQ_TBLPROPERTIES varchar(2048), - CQ_WORKER_ID varchar(128), - CQ_START bigint, - CQ_RUN_AS varchar(128), - CQ_HIGHEST_TXN_ID bigint, - CQ_META_INFO bytea, - CQ_HADOOP_JOB_ID varchar(32) -); - -CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( - NCQ_NEXT bigint NOT NULL -); -INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); - -CREATE TABLE COMPLETED_COMPACTIONS ( - CC_ID bigint PRIMARY KEY, - CC_DATABASE varchar(128) NOT NULL, - CC_TABLE varchar(128) NOT NULL, - CC_PARTITION varchar(767), - CC_STATE char(1) NOT NULL, - CC_TYPE char(1) NOT NULL, - CC_TBLPROPERTIES varchar(2048), - CC_WORKER_ID varchar(128), - CC_START bigint, - CC_END bigint, - CC_RUN_AS varchar(128), - CC_HIGHEST_TXN_ID bigint, - CC_META_INFO bytea, - CC_HADOOP_JOB_ID varchar(32) -); - -CREATE TABLE AUX_TABLE ( - MT_KEY1 varchar(128) NOT NULL, - MT_KEY2 bigint NOT NULL, - MT_COMMENT varchar(255), - PRIMARY KEY(MT_KEY1, MT_KEY2) -); - -CREATE TABLE WRITE_SET ( - WS_DATABASE varchar(128) NOT NULL, - WS_TABLE varchar(128) NOT NULL, - WS_PARTITION varchar(767), - WS_TXNID bigint NOT NULL, - WS_COMMIT_ID bigint NOT NULL, - WS_OPERATION_TYPE char(1) NOT NULL -); diff --git metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql deleted file mode 100644 index 44bee33e83..0000000000 --- metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql +++ /dev/null @@ -1,14 +0,0 @@ -SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0'; - -\i 040-HIVE-16556.postgres.sql; -\i 041-HIVE-16575.postgres.sql; -\i 042-HIVE-16922.postgres.sql; -\i 043-HIVE-16997.postgres.sql; -\i 044-HIVE-16886.postgres.sql; -\i 045-HIVE-17566.postgres.sql; -\i 047-HIVE-14498.postgres.sql; -\i 048-HIVE-18489.postgres.sql; - -UPDATE "VERSION" SET "SCHEMA_VERSION"='3.0.0', "VERSION_COMMENT"='Hive release version 3.0.0' where "VER_ID"=1; -SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0'; - diff --git packaging/src/main/assembly/bin.xml packaging/src/main/assembly/bin.xml index c66fa1edca..5d934ac53a 100644 --- packaging/src/main/assembly/bin.xml +++ packaging/src/main/assembly/bin.xml @@ -218,6 +218,14 @@ + ${project.parent.basedir}/standalone-metastore/src/main/sql + + **/* + + scripts/metastore/upgrade + + + ${project.parent.basedir}/conf *.template diff --git packaging/src/main/assembly/src.xml packaging/src/main/assembly/src.xml index 86269229d8..486fe5248e 100644 --- packaging/src/main/assembly/src.xml +++ packaging/src/main/assembly/src.xml @@ -61,6 +61,7 @@ beeline/**/* bin/**/* checkstyle/**/* + classification/**/* cli/**/* common/**/* conf/**/* @@ -95,8 +96,10 @@ shims/**/* spark-client/**/* storage-api/**/* + standalone-metastore/**/* testutils/**/* vector-code-gen/**/* + kryo-registrator/**/* / diff --git standalone-metastore/DEV-README standalone-metastore/DEV-README new file mode 100644 index 0000000000..446f5801c6 --- /dev/null +++ standalone-metastore/DEV-README @@ -0,0 +1,40 @@ +This file contains information for developers and testers. + +Testing metastore installation and upgrade against databases beyond Derby: +There are integration tests for testing installation and upgrade of the +metastore on MySQL (actually MariaDB is used), Oracle, Postgres, and SQLServer. +These tests are not run by default because they take several minutes each and +they require the developer to download the JDBC driver for Oracle. +They are run in the integration-test phase. + +Each ITest runs two tests, one that installs the latest version of the +database and one that installs the latest version minus one and then upgrades +the database. + +To run one of the tests you will need to explicitly turn on integration testing, +in the Oracle case specify the location of the JDBC driver, and optionally +specify which test you want to run. To run all of the tests do: + +mvn verify -Ditest.jdbc.jars=_oracle_jar_path -DskipITests=false -Dtest=nosuch + +To run just one test, do + +mvn verify -DskipITests=false -Dit.test=ITestMysql -Dtest=nosuch + +You can download the Oracle driver at +http://www.oracle.com/technetwork/database/features/jdbc/index-091264.html + +If you wish to use one of these containers to run your own tests against a +non-Derby version of the metastore, you can do that as well. You must specify +that only the install test be run (change -Dit.test=ITestMysql in the example +above to -Dit.test=ITestMysql#install) and tell it to leave the docker container +running by adding -Dmetastore.itest.no.stop.container=true. You will then need +to stop and remove the container yourself once you have finished. The container +is recreated for each run of the test, so you cannot rerun the test until you +have stopped and removed it. You can construct the connection values to put in +metastore-site.xml from the information in the appropriate ITest file (e.g., +from ITestMysql you can find that the JDBC URL is +"jdbc:mysql://localhost:3306/hivedb", the JDBC driver is +"org.mariadb.jdbc.Driver", and the password is "hivepassword". The user is +always "hiveuser". + diff --git standalone-metastore/binary-package-licenses/NOTICE standalone-metastore/binary-package-licenses/NOTICE new file mode 100644 index 0000000000..76cce78d7d --- /dev/null +++ standalone-metastore/binary-package-licenses/NOTICE @@ -0,0 +1,4 @@ +Binary distributions of this software contain jars that are not licensed under the +Apache License 2.0. Additional licenses attached to these jars are contained in the +same directory as this NOTICE file. + diff --git standalone-metastore/binary-package-licenses/com.google.protobuf-LICENSE standalone-metastore/binary-package-licenses/com.google.protobuf-LICENSE new file mode 100644 index 0000000000..f028c82324 --- /dev/null +++ standalone-metastore/binary-package-licenses/com.google.protobuf-LICENSE @@ -0,0 +1,42 @@ +This license applies to all parts of Protocol Buffers except the following: + + - Atomicops support for generic gcc, located in + src/google/protobuf/stubs/atomicops_internals_generic_gcc.h. + This file is copyrighted by Red Hat Inc. + + - Atomicops support for AIX/POWER, located in + src/google/protobuf/stubs/atomicops_internals_power.h. + This file is copyrighted by Bloomberg Finance LP. + +Copyright 2014, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. diff --git standalone-metastore/binary-package-licenses/javax.transaction.transaction-api-LICENSE standalone-metastore/binary-package-licenses/javax.transaction.transaction-api-LICENSE new file mode 100644 index 0000000000..d7b7cfe857 --- /dev/null +++ standalone-metastore/binary-package-licenses/javax.transaction.transaction-api-LICENSE @@ -0,0 +1,128 @@ +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE Version 1.0 (CDDL-1.0) +1. Definitions. + +1.1. Contributor means each individual or entity that creates or contributes to the creation of Modifications. + +1.2. Contributor Version means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. + +1.3. Covered Software means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. + +1.4. Executable means the Covered Software in any form other than Source Code. + +1.5. Initial Developer means the individual or entity that first makes Original Software available under this License. + +1.6. Larger Work means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. + +1.7. License means this document. + +1.8. Licensable means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. + +1.9. Modifications means the Source Code and Executable form of any of the following: + +A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; + +B. Any new file that contains any part of the Original Software or previous Modification; or + +C. Any new file that is contributed or otherwise made available under the terms of this License. + +1.10. Original Software means the Source Code and Executable form of computer software code that is originally released under this License. + +1.11. Patent Claims means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. + +1.12. Source Code means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. + +1.13. You (or Your) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, You includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, control means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. + +2. License Grants. + +2.1. The Initial Developer Grant. + +Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and + +(b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). + +(c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. + +(d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices. + +2.2. Contributor Grant. + +Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and + +(b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). + +(c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. + +(d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. + +3. Distribution Obligations. + +3.1. Availability of Source Code. + +Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. + +3.2. Modifications. + +The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. + +3.3. Required Notices. + +You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. + +3.4. Application of Additional Terms. + +You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. + +3.5. Distribution of Executable Versions. + +You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipients rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. + +3.6. Larger Works. + +You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. + +4. Versions of the License. + +4.1. New Versions. + +Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. + +4.2. Effect of New Versions. + +You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. + +4.3. Modified Versions. + +When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License. + +5. DISCLAIMER OF WARRANTY. + +COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN AS IS BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + +6. TERMINATION. + +6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. + +6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as Participant) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. + +6.3. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + +UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTYS NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + +The Covered Software is a commercial item, as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of commercial computer software (as that term is defined at 48 C.F.R. 252.227-7014(a)(1)) and commercial computer software documentation as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. + +9. MISCELLANEOUS. + +This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdictions conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + +As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. diff --git standalone-metastore/binary-package-licenses/javolution-LICENSE standalone-metastore/binary-package-licenses/javolution-LICENSE new file mode 100644 index 0000000000..bd6788d401 --- /dev/null +++ standalone-metastore/binary-package-licenses/javolution-LICENSE @@ -0,0 +1,25 @@ +Javolution - Java(tm) Solution for Real-Time and Embedded Systems +Copyright (c) 2012, Javolution (http://javolution.org/) +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git standalone-metastore/binary-package-licenses/jline-LICENSE standalone-metastore/binary-package-licenses/jline-LICENSE new file mode 100644 index 0000000000..246f54f736 --- /dev/null +++ standalone-metastore/binary-package-licenses/jline-LICENSE @@ -0,0 +1,32 @@ +Copyright (c) 2002-2006, Marc Prud'hommeaux +All rights reserved. + +Redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following +conditions are met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with +the distribution. + +Neither the name of JLine nor the names of its contributors +may be used to endorse or promote products derived from this +software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, +OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED +AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. diff --git standalone-metastore/binary-package-licenses/org.antlr-LICENSE standalone-metastore/binary-package-licenses/org.antlr-LICENSE new file mode 100644 index 0000000000..f6d28b751c --- /dev/null +++ standalone-metastore/binary-package-licenses/org.antlr-LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2003-2008, Terence Parr +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. Redistributions +in binary form must reproduce the above copyright notice, this list of +conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. Neither the name of +the author nor the names of its contributors may be used to endorse or +promote products derived from this software without specific prior +written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git standalone-metastore/binary-package-licenses/sqlline-LICENSE standalone-metastore/binary-package-licenses/sqlline-LICENSE new file mode 100644 index 0000000000..47e039164c --- /dev/null +++ standalone-metastore/binary-package-licenses/sqlline-LICENSE @@ -0,0 +1,33 @@ +Copyright (c) 2002,2003,2004,2005,2006 Marc Prud'hommeaux +All rights reserved. + + +Redistribution and use in source and binary forms, +with or without modification, are permitted provided +that the following conditions are met: + +Redistributions of source code must retain the above +copyright notice, this list of conditions and the following +disclaimer. +Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials +provided with the distribution. +Neither the name of the nor the names +of its contributors may be used to endorse or promote +products derived from this software without specific +prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS +AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git standalone-metastore/pom.xml standalone-metastore/pom.xml index 58ed741712..df769f5afe 100644 --- standalone-metastore/pom.xml +++ standalone-metastore/pom.xml @@ -38,6 +38,8 @@ 1.8 false ${settings.localRepository} + 2.3 + 1.6.0 ${project.basedir}/src/test/resources @@ -45,6 +47,8 @@ ${project.build.directory}/warehouse file:// 1 + true + set-this-to-colon-separated-full-path-list-of-jars-to-run-integration-tests 1.0b3 @@ -75,6 +79,7 @@ 2.8.2 1.10.19 2.5.0 + 1.3.0 3.0.0-SNAPSHOT @@ -277,10 +282,21 @@ 1.4.0 test + + sqlline + sqlline + ${sqlline.version} + + com.microsoft.sqlserver + mssql-jdbc + 6.2.1.jre8 + test + + junit junit ${junit.version} @@ -292,6 +308,20 @@ ${mockito-all.version} test + + + org.mariadb.jdbc + mariadb-java-client + 2.2.0 + test + + + org.postgresql + postgresql + 9.3-1102-jdbc41 + test + @@ -427,6 +457,11 @@ maven-checkstyle-plugin ${maven.checkstyle.plugin.version} + + org.codehaus.mojo + exec-maven-plugin + ${maven.exec.plugin.version} + @@ -467,6 +502,21 @@ run + + setup-metastore-scripts + process-test-resources + + run + + + + + + + + + + @@ -498,11 +548,62 @@ - org.apache.maven.plugins + maven-assembly-plugin + ${maven.assembly.plugin.version} + + + assemble + package + + single + + + apache-hive-metastore-${project.version} + + src/assembly/bin.xml + src/assembly/src.xml + + gnu + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + 2.20.1 + + + + integration-test + verify + + + + + true + false + -Xmx2048m + false + + true + ${test.tmp.dir} + ${test.tmp.dir} + true + + + ${log4j.conf.dir} + ${itest.jdbc.jars} + + ${skipITests} + + + + org.apache.maven.plugins maven-surefire-plugin 2.16 @@ -583,6 +684,27 @@ + org.codehaus.mojo + exec-maven-plugin + + + prepare-package + + exec + + + + + java + + -classpath + + org.apache.hadoop.hive.metastore.conf.ConfTemplatePrinter + ${project.build.directory}/generated-sources/conf/metastore-site.xml.template + + + + org.datanucleus datanucleus-maven-plugin 4.0.5 diff --git standalone-metastore/src/assembly/bin.xml standalone-metastore/src/assembly/bin.xml new file mode 100644 index 0000000000..81912d78da --- /dev/null +++ standalone-metastore/src/assembly/bin.xml @@ -0,0 +1,136 @@ + + + + + bin + + + dir + tar.gz + + + apache-hive-metastore-${project.version}-bin + + + + lib + false + true + true + true + + org.apache.hadoop:* + org.slf4j:* + log4j:* + + + + + + + ${project.basedir} + + target/** + .classpath + .project + .settings/** + lib/** + + + + README.txt + LICENSE + NOTICE + + / + + + + ${project.basedir}/binary-package-licenses + + /* + + + /README + + binary-package-licenses + + + + 755 + ${project.basedir}/src/main/scripts + + base + schematool + start-metastore + metastore-config.sh + ext/**/* + + bin + + + + ${project.basedir}/src/main/sql + + **/* + + scripts/metastore/upgrade + + + + ${project.basedir}/src/gen/thrift/gen-php + + **/* + + lib/php/packages/hive_metastore + + + + ${project.basedir}/src/gen/thrift/gen-py/hive_metastore + 755 + + **/* + + lib/py/hive_metastore + + + + ${project.basedir}/src/main/resources/ + 644 + + metastore-site.xml + metastore-log4j2.properties + + conf + + + + + + ${project.build.directory}/generated-sources/conf/metastore-site.xml.template + conf + + + + + diff --git standalone-metastore/src/assembly/src.xml standalone-metastore/src/assembly/src.xml new file mode 100644 index 0000000000..a2405443ea --- /dev/null +++ standalone-metastore/src/assembly/src.xml @@ -0,0 +1,53 @@ + + + + + src + + + tar.gz + + + apache-hive-metastore-${project.version}-src + + + + ${project.basedir} + + + target/** + + + + .checkstyle + .gitattributes + .gitignore + LICENSE + NOTICE + pom.xml + src/**/* + + / + + + diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java index be89f9b56b..ed4a2efb4b 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java @@ -55,6 +55,13 @@ String generateInitFileName(String toVersion) throws HiveMetaException; /** + * Get SQL script that will create the user and database for Metastore to use. + * @return filename + * @throws HiveMetaException if something goes wrong. + */ + String getCreateUserScript() throws HiveMetaException; + + /** * Find the directory of metastore scripts * * @return the path of directory where the sql scripts are diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java index 0c368555c6..d6ef53c6d7 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java @@ -39,12 +39,14 @@ public class MetaStoreSchemaInfo implements IMetaStoreSchemaInfo { protected static final String UPGRADE_FILE_PREFIX = "upgrade-"; - private static final String INIT_FILE_PREFIX = "hive-schema-"; - private static final String VERSION_UPGRADE_LIST = "upgrade.order"; - private static final String PRE_UPGRADE_PREFIX = "pre-"; - protected final String dbType; + protected static final String INIT_FILE_PREFIX = "hive-schema-"; + protected static final String VERSION_UPGRADE_LIST = "upgrade.order"; + protected static final String PRE_UPGRADE_PREFIX = "pre-"; + protected static final String CREATE_USER_PREFIX = "create-user"; + private String[] hiveSchemaVersions; - private final String hiveHome; + private final String metastoreHome; + protected final String dbType; // Some version upgrades often don't change schema. So they are equivalent to // a version @@ -57,8 +59,8 @@ "1.2.1", "1.2.0" ); - public MetaStoreSchemaInfo(String hiveHome, String dbType) throws HiveMetaException { - this.hiveHome = hiveHome; + public MetaStoreSchemaInfo(String metastoreHome, String dbType) throws HiveMetaException { + this.metastoreHome = metastoreHome; this.dbType = dbType; } @@ -137,13 +139,24 @@ public String generateInitFileName(String toVersion) throws HiveMetaException { return initScriptName; } + @Override + public String getCreateUserScript() throws HiveMetaException { + String createScript = CREATE_USER_PREFIX + "." + dbType + SQL_FILE_EXTENSION; + // check if the file exists + if (!(new File(getMetaStoreScriptDir() + File.separatorChar + + createScript).exists())) { + throw new HiveMetaException("Unable to find create user file, expected: " + createScript); + } + return createScript; + } + /** * Find the directory of metastore scripts * @return */ @Override public String getMetaStoreScriptDir() { - return hiveHome + File.separatorChar + + return metastoreHome + File.separatorChar + "scripts" + File.separatorChar + "metastore" + File.separatorChar + "upgrade" + File.separatorChar + dbType; } @@ -209,7 +222,7 @@ public String getMetaStoreSchemaVersion(MetaStoreConnectionInfo connectionInfo) throws HiveMetaException { String versionQuery; boolean needsQuotedIdentifier = - HiveSchemaHelper.getDbCommandParser(connectionInfo.getDbType()).needsQuotedIdentifier(); + HiveSchemaHelper.getDbCommandParser(connectionInfo.getDbType(), false).needsQuotedIdentifier(); if (needsQuotedIdentifier) { versionQuery = "select t.\"SCHEMA_VERSION\" from \"VERSION\" t"; } else { diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/ConfTemplatePrinter.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/ConfTemplatePrinter.java new file mode 100644 index 0000000000..f57e2ceacb --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/ConfTemplatePrinter.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.conf; + +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.w3c.dom.Text; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.transform.OutputKeys; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.stream.StreamResult; +import java.io.File; + +public class ConfTemplatePrinter { + + private Document generateTemplate() throws ParserConfigurationException { + DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); + DocumentBuilder docBuilder = dbf.newDocumentBuilder(); + Document doc = docBuilder.newDocument(); + doc.appendChild(doc.createProcessingInstruction( + "xml-stylesheet", "type=\"text/xsl\" href=\"configuration.xsl\"")); + + doc.appendChild(doc.createComment("\n" + + " Licensed to the Apache Software Foundation (ASF) under one or more\n" + + " contributor license agreements. See the NOTICE file distributed with\n" + + " this work for additional information regarding copyright ownership.\n" + + " The ASF licenses this file to You under the Apache License, Version 2.0\n" + + " (the \"License\"); you may not use this file except in compliance with\n" + + " the License. You may obtain a copy of the License at\n" + + "\n" + + " http://www.apache.org/licenses/LICENSE-2.0\n" + + "\n" + + " Unless required by applicable law or agreed to in writing, software\n" + + " distributed under the License is distributed on an \"AS IS\" BASIS,\n" + + " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" + + " See the License for the specific language governing permissions and\n" + + " limitations under the License.\n")); + + Element root = doc.createElement("configuration"); + doc.appendChild(root); + + root.appendChild(doc.createComment( + " WARNING!!! This file is auto generated for documentation purposes ONLY! ")); + root.appendChild(doc.createComment( + " WARNING!!! Any changes you make to this file will be ignored by the metastore. ")); + root.appendChild(doc.createComment( + " WARNING!!! You must make your changes in metastore-site.xml instead. ")); + + root.appendChild(doc.createComment(" Metastore Execution Parameters ")); + + root.appendChild(doc.createComment("================================")); + root.appendChild(doc.createComment("All time unit values have a time unit abbreviation suffix")); + root.appendChild(doc.createComment("Any time value can take any of the units")); + root.appendChild(doc.createComment("d = day")); + root.appendChild(doc.createComment("h = hour")); + root.appendChild(doc.createComment("m = minute")); + root.appendChild(doc.createComment("s = second")); + root.appendChild(doc.createComment("ms = millisecond")); + root.appendChild(doc.createComment("us = microsecond")); + root.appendChild(doc.createComment("ns = nanosecond")); + root.appendChild(doc.createComment("================================")); + + for (MetastoreConf.ConfVars confVars : MetastoreConf.ConfVars.values()) { + Element property = appendElement(root, "property", null); + appendElement(property, "name", confVars.getVarname()); + appendElement(property, "value", confVars.getDefaultVal().toString()); + appendElement(property, "description", normalize(confVars.getDescription())); + // wish to add new line here. + } + return doc; + + } + + private String normalize(String description) { + int index = description.indexOf('\n'); + if (index < 0) { + return description; + } + int prev = 0; + StringBuilder builder = new StringBuilder(description.length() << 1); + for (;index > 0; index = description.indexOf('\n', prev = index + 1)) { + builder.append("\n ").append(description.substring(prev, index)); + } + if (prev < description.length()) { + builder.append("\n ").append(description.substring(prev)); + } + builder.append("\n "); + return builder.toString(); + } + + private void writeToFile(File template, Document document) throws TransformerException { + Transformer transformer = TransformerFactory.newInstance().newTransformer(); + transformer.setOutputProperty(OutputKeys.INDENT, "yes"); + transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2"); + DOMSource source = new DOMSource(document); + StreamResult result = new StreamResult(template); + transformer.transform(source, result); + } + + private Element appendElement(Element parent, String name, String text) { + Document document = parent.getOwnerDocument(); + Element child = document.createElement(name); + parent.appendChild(child); + if (text != null) { + Text textNode = document.createTextNode(text); + child.appendChild(textNode); + } + return child; + } + + private void print(String fileName) throws ParserConfigurationException, TransformerException { + Document doc = generateTemplate(); + File file = new File(fileName); + File dir = file.getParentFile(); + // Make certain the target directory exists. + dir.mkdirs(); + writeToFile(file, doc); + } + + public static void main(String[] args) throws Exception { + if (args.length != 1) { + String msg = "Usage: ConfTemplatePrinter filename"; + System.err.println(msg); + throw new RuntimeException(msg); + } + ConfTemplatePrinter printer = new ConfTemplatePrinter(); + printer.print(args[0]); + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index 5b8e5ca8a0..699a64948f 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -96,7 +96,16 @@ private TimeValue(long val, TimeUnit unit) { @Override public String toString() { - return Long.toString(unit.toNanos(val)) + "ns"; + switch (unit) { + case NANOSECONDS: return Long.toString(val) + "ns"; + case MICROSECONDS: return Long.toString(val) + "us"; + case MILLISECONDS: return Long.toString(val) + "ms"; + case SECONDS: return Long.toString(val) + "s"; + case MINUTES: return Long.toString(val) + "m"; + case HOURS: return Long.toString(val) + "h"; + case DAYS: return Long.toString(val) + "d"; + } + throw new RuntimeException("Unknown time unit " + unit); } } @@ -537,7 +546,7 @@ public static ConfVars getMetaConf(String name) { "The default value \"-1\" means no limit."), LOG4J_FILE("metastore.log4j.file", "hive.log4j.file", "", "Hive log4j configuration file.\n" + - "If the property is not set, then logging will be initialized using hive-log4j2.properties found on the classpath.\n" + + "If the property is not set, then logging will be initialized using metastore-log4j2.properties found on the classpath.\n" + "If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" + "which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."), MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass", @@ -893,81 +902,91 @@ public static ConfVars getMetaConf(String name) { private final Object defaultVal; private final Validator validator; private final boolean caseSensitive; + private final String description; - ConfVars(String varname, String hiveName, String defaultVal, String comment) { + ConfVars(String varname, String hiveName, String defaultVal, String description) { this.varname = varname; this.hiveName = hiveName; this.defaultVal = defaultVal; validator = null; caseSensitive = false; + this.description = description; } ConfVars(String varname, String hiveName, String defaultVal, Validator validator, - String comment) { + String description) { this.varname = varname; this.hiveName = hiveName; this.defaultVal = defaultVal; this.validator = validator; caseSensitive = false; + this.description = description; } ConfVars(String varname, String hiveName, String defaultVal, boolean caseSensitive, - String comment) { + String description) { this.varname = varname; this.hiveName = hiveName; this.defaultVal = defaultVal; validator = null; this.caseSensitive = caseSensitive; + this.description = description; } - ConfVars(String varname, String hiveName, long defaultVal, String comment) { + ConfVars(String varname, String hiveName, long defaultVal, String description) { this.varname = varname; this.hiveName = hiveName; this.defaultVal = defaultVal; validator = null; caseSensitive = false; + this.description = description; } ConfVars(String varname, String hiveName, long defaultVal, Validator validator, - String comment) { + String description) { this.varname = varname; this.hiveName = hiveName; this.defaultVal = defaultVal; this.validator = validator; caseSensitive = false; + this.description = description; } - ConfVars(String varname, String hiveName, boolean defaultVal, String comment) { + ConfVars(String varname, String hiveName, boolean defaultVal, String description) { this.varname = varname; this.hiveName = hiveName; this.defaultVal = defaultVal; validator = null; caseSensitive = false; + this.description = description; } - ConfVars(String varname, String hiveName, double defaultVal, String comment) { + ConfVars(String varname, String hiveName, double defaultVal, String description) { this.varname = varname; this.hiveName = hiveName; this.defaultVal = defaultVal; validator = null; caseSensitive = false; + this.description = description; } - ConfVars(String varname, String hiveName, long defaultVal, TimeUnit unit, String comment) { + ConfVars(String varname, String hiveName, long defaultVal, TimeUnit unit, String description) { this.varname = varname; this.hiveName = hiveName; this.defaultVal = new TimeValue(defaultVal, unit); validator = new Validator.TimeValidator(unit); caseSensitive = false; + this.description = description; } ConfVars(String varname, String hiveName, long defaultVal, TimeUnit unit, - Validator validator, String comment) { + Validator validator, String description) { this.varname = varname; this.hiveName = hiveName; this.defaultVal = new TimeValue(defaultVal, unit); this.validator = validator; caseSensitive = false; + this.description = description; } public void validate(String value) throws IllegalArgumentException { @@ -1008,6 +1027,10 @@ public Object getDefaultVal() { return defaultVal; } + public String getDescription() { + return description; + } + @Override public String toString() { return varname; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java index 08a3af5e02..80b9af268a 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java @@ -22,6 +22,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.HiveMetaException; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.BufferedReader; import java.io.File; @@ -34,6 +36,8 @@ import java.util.List; public class HiveSchemaHelper { + private static final Logger LOG = LoggerFactory.getLogger(HiveSchemaHelper.class); + public static final String DB_DERBY = "derby"; public static final String DB_HIVE = "hive"; public static final String DB_MSSQL = "mssql"; @@ -56,14 +60,15 @@ public static Connection getConnectionToMetastore(String userName, Configuration conf) throws HiveMetaException { try { - url = url == null ? getValidConfVar( - MetastoreConf.ConfVars.CONNECTURLKEY, conf) : url; - driver = driver == null ? getValidConfVar( - MetastoreConf.ConfVars.CONNECTION_DRIVER, conf) : driver; + url = url == null ? MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECTURLKEY) : url; + driver = driver == null ? MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECTION_DRIVER) : driver; if (printInfo) { - System.out.println("Metastore connection URL:\t " + url); - System.out.println("Metastore Connection Driver :\t " + driver); - System.out.println("Metastore connection User:\t " + userName); + logAndPrintToStdout("Metastore connection URL:\t " + url); + logAndPrintToStdout("Metastore Connection Driver :\t " + driver); + logAndPrintToStdout("Metastore connection User:\t " + userName); + if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST)) { + logAndPrintToStdout("Metastore connection Password:\t " + password); + } } if ((userName == null) || userName.isEmpty()) { throw new HiveMetaException("UserName empty "); @@ -74,11 +79,11 @@ public static Connection getConnectionToMetastore(String userName, // Connect using the JDBC URL and user/pass from conf return DriverManager.getConnection(url, userName, password); - } catch (IOException e) { - throw new HiveMetaException("Failed to get schema version.", e); } catch (SQLException e) { - throw new HiveMetaException("Failed to get schema version.", e); + LOG.error("Failed to connect", e); + throw new HiveMetaException("Failed to connect.", e); } catch (ClassNotFoundException e) { + LOG.error("Unable to find driver class", e); throw new HiveMetaException("Failed to load driver", e); } } @@ -97,6 +102,11 @@ public static String getValidConfVar(MetastoreConf.ConfVars confVar, Configurati return confVarStr.trim(); } + private static void logAndPrintToStdout(String msg) { + LOG.info(msg); + System.out.println(msg); + } + public interface NestedScriptParser { enum CommandType { @@ -201,13 +211,17 @@ String buildCommand(String scriptDir, String scriptFile, boolean fixQuotes) private String msUsername; private String msPassword; private Configuration conf; + // Depending on whether we are using beeline or sqlline the line endings have to be handled + // differently. + private final boolean usingSqlLine; public AbstractCommandParser(String dbOpts, String msUsername, String msPassword, - Configuration conf) { + Configuration conf, boolean usingSqlLine) { setDbOpts(dbOpts); this.msUsername = msUsername; this.msPassword = msPassword; this.conf = conf; + this.usingSqlLine = usingSqlLine; } @Override @@ -300,6 +314,7 @@ public String buildCommand( // Now we have a complete statement, process it // write the line to buffer sb.append(currentCommand); + if (usingSqlLine) sb.append(";"); sb.append(System.getProperty("line.separator")); } } @@ -339,8 +354,8 @@ protected Configuration getConf() { private static final String DERBY_NESTING_TOKEN = "RUN"; public DerbyCommandParser(String dbOpts, String msUsername, String msPassword, - Configuration conf) { - super(dbOpts, msUsername, msPassword, conf); + Configuration conf, boolean usingSqlLine) { + super(dbOpts, msUsername, msPassword, conf, usingSqlLine); } @Override @@ -369,9 +384,9 @@ public boolean isNestedScript(String dbCommand) { private final NestedScriptParser nestedDbCommandParser; public HiveCommandParser(String dbOpts, String msUsername, String msPassword, - Configuration conf, String metaDbType) { - super(dbOpts, msUsername, msPassword, conf); - nestedDbCommandParser = getDbCommandParser(metaDbType); + Configuration conf, String metaDbType, boolean usingSqlLine) { + super(dbOpts, msUsername, msPassword, conf, usingSqlLine); + nestedDbCommandParser = getDbCommandParser(metaDbType, usingSqlLine); } @Override @@ -405,8 +420,8 @@ public boolean isNestedScript(String dbCommand) { private String delimiter = DEFAULT_DELIMITER; public MySqlCommandParser(String dbOpts, String msUsername, String msPassword, - Configuration conf) { - super(dbOpts, msUsername, msPassword, conf); + Configuration conf, boolean usingSqlLine) { + super(dbOpts, msUsername, msPassword, conf, usingSqlLine); } @Override @@ -471,8 +486,8 @@ public String cleanseCommand(String dbCommand) { public static final String POSTGRES_SKIP_STANDARD_STRINGS_DBOPT = "postgres.filter.81"; public PostgresCommandParser(String dbOpts, String msUsername, String msPassword, - Configuration conf) { - super(dbOpts, msUsername, msPassword, conf); + Configuration conf, boolean usingSqlLine) { + super(dbOpts, msUsername, msPassword, conf, usingSqlLine); } @Override @@ -514,8 +529,8 @@ public boolean isNonExecCommand(String dbCommand) { private static final String ORACLE_NESTING_TOKEN = "@"; public OracleCommandParser(String dbOpts, String msUsername, String msPassword, - Configuration conf) { - super(dbOpts, msUsername, msPassword, conf); + Configuration conf, boolean usingSqlLine) { + super(dbOpts, msUsername, msPassword, conf, usingSqlLine); } @Override @@ -538,8 +553,8 @@ public boolean isNestedScript(String dbCommand) { private static final String MSSQL_NESTING_TOKEN = ":r"; public MSSQLCommandParser(String dbOpts, String msUsername, String msPassword, - Configuration conf) { - super(dbOpts, msUsername, msPassword, conf); + Configuration conf, boolean usingSqlLine) { + super(dbOpts, msUsername, msPassword, conf, usingSqlLine); } @Override @@ -557,29 +572,29 @@ public boolean isNestedScript(String dbCommand) { } } - public static NestedScriptParser getDbCommandParser(String dbName) { - return getDbCommandParser(dbName, null); + public static NestedScriptParser getDbCommandParser(String dbName, boolean usingSqlLine) { + return getDbCommandParser(dbName, null, usingSqlLine); } - public static NestedScriptParser getDbCommandParser(String dbName, String metaDbName) { - return getDbCommandParser(dbName, null, null, null, null, metaDbName); + public static NestedScriptParser getDbCommandParser(String dbName, String metaDbName, boolean usingSqlLine) { + return getDbCommandParser(dbName, null, null, null, null, metaDbName, usingSqlLine); } public static NestedScriptParser getDbCommandParser(String dbName, String dbOpts, String msUsername, String msPassword, - Configuration conf, String metaDbType) { + Configuration conf, String metaDbType, boolean usingSqlLine) { if (dbName.equalsIgnoreCase(DB_DERBY)) { - return new DerbyCommandParser(dbOpts, msUsername, msPassword, conf); + return new DerbyCommandParser(dbOpts, msUsername, msPassword, conf, usingSqlLine); } else if (dbName.equalsIgnoreCase(DB_HIVE)) { - return new HiveCommandParser(dbOpts, msUsername, msPassword, conf, metaDbType); + return new HiveCommandParser(dbOpts, msUsername, msPassword, conf, metaDbType, usingSqlLine); } else if (dbName.equalsIgnoreCase(DB_MSSQL)) { - return new MSSQLCommandParser(dbOpts, msUsername, msPassword, conf); + return new MSSQLCommandParser(dbOpts, msUsername, msPassword, conf, usingSqlLine); } else if (dbName.equalsIgnoreCase(DB_MYSQL)) { - return new MySqlCommandParser(dbOpts, msUsername, msPassword, conf); + return new MySqlCommandParser(dbOpts, msUsername, msPassword, conf, usingSqlLine); } else if (dbName.equalsIgnoreCase(DB_POSTGRACE)) { - return new PostgresCommandParser(dbOpts, msUsername, msPassword, conf); + return new PostgresCommandParser(dbOpts, msUsername, msPassword, conf, usingSqlLine); } else if (dbName.equalsIgnoreCase(DB_ORACLE)) { - return new OracleCommandParser(dbOpts, msUsername, msPassword, conf); + return new OracleCommandParser(dbOpts, msUsername, msPassword, conf, usingSqlLine); } else { throw new IllegalArgumentException("Unknown dbType " + dbName); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/MetastoreSchemaTool.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/MetastoreSchemaTool.java new file mode 100644 index 0000000000..eef034a44f --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/MetastoreSchemaTool.java @@ -0,0 +1,1308 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.tools; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.GnuParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.OptionBuilder; +import org.apache.commons.cli.OptionGroup; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.io.output.NullOutputStream; +import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.HiveMetaException; +import org.apache.hadoop.hive.metastore.IMetaStoreSchemaInfo; +import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfoFactory; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo; +import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.NestedScriptParser; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.ImmutableMap; +import sqlline.SqlLine; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.io.OutputStream; +import java.io.PrintStream; +import java.net.URI; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class MetastoreSchemaTool { + private static final Logger LOG = LoggerFactory.getLogger(MetastoreSchemaTool.class); + private static final String PASSWD_MASK = "[passwd stripped]"; + + @VisibleForTesting + public static String homeDir; + + private String userName = null; + private String passWord = null; + private boolean dryRun = false; + private boolean verbose = false; + private String dbOpts = null; + private String url = null; + private String driver = null; + private URI[] validationServers = null; // The list of servers the database/partition/table can locate on + private String hiveUser; // Hive username, for use when creating the user, not for connecting + private String hivePasswd; // Hive password, for use when creating the user, not for connecting + private String hiveDb; // Hive database, for use when creating the user, not for connecting + private final Configuration conf; + private final String dbType; + private final IMetaStoreSchemaInfo metaStoreSchemaInfo; + private boolean needsQuotedIdentifier; + + private static String findHomeDir() { + // If METASTORE_HOME is set, use it, else use HIVE_HOME for backwards compatibility. + homeDir = homeDir == null ? System.getenv("METASTORE_HOME") : homeDir; + return homeDir == null ? System.getenv("HIVE_HOME") : homeDir; + } + + private MetastoreSchemaTool(String dbType) throws HiveMetaException { + this(findHomeDir(), MetastoreConf.newMetastoreConf(), dbType); + } + + MetastoreSchemaTool(String metastoreHome, Configuration conf, String dbType) + throws HiveMetaException { + if (metastoreHome == null || metastoreHome.isEmpty()) { + throw new HiveMetaException("No Metastore home directory provided"); + } + this.conf = conf; + this.dbType = dbType; + this.needsQuotedIdentifier = getDbCommandParser(dbType).needsQuotedIdentifier(); + this.metaStoreSchemaInfo = MetaStoreSchemaInfoFactory.get(conf, metastoreHome, dbType); + } + + Configuration getConf() { + return conf; + } + + void setUrl(String url) { + this.url = url; + } + + void setDriver(String driver) { + this.driver = driver; + } + + void setUserName(String userName) { + this.userName = userName; + } + + void setPassWord(String passWord) { + this.passWord = passWord; + } + + void setDryRun(boolean dryRun) { + this.dryRun = dryRun; + } + + void setVerbose(boolean verbose) { + this.verbose = verbose; + } + + private void setDbOpts(String dbOpts) { + this.dbOpts = dbOpts; + } + + private void setValidationServers(String servers) { + if(StringUtils.isNotEmpty(servers)) { + String[] strServers = servers.split(","); + this.validationServers = new URI[strServers.length]; + for (int i = 0; i < validationServers.length; i++) { + validationServers[i] = new Path(strServers[i]).toUri(); + } + } + } + + private void setHiveUser(String hiveUser) { + this.hiveUser = hiveUser; + } + + private void setHivePasswd(String hivePasswd) { + this.hivePasswd = hivePasswd; + } + + private void setHiveDb(String hiveDb) { + this.hiveDb = hiveDb; + } + + private static int usage(Options cmdLineOptions) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("schemaTool", cmdLineOptions); + return 1; + } + + Connection getConnectionToMetastore(boolean printInfo) + throws HiveMetaException { + return HiveSchemaHelper.getConnectionToMetastore(userName, + passWord, url, driver, printInfo, conf); + } + + private NestedScriptParser getDbCommandParser(String dbType) { + return HiveSchemaHelper.getDbCommandParser(dbType, dbOpts, userName, + passWord, conf, null, true); + } + + /*** + * Print Hive version and schema version + */ + private void showInfo() throws HiveMetaException { + String hiveVersion = metaStoreSchemaInfo.getHiveSchemaVersion(); + String dbVersion = metaStoreSchemaInfo.getMetaStoreSchemaVersion(getConnectionInfo(true)); + System.out.println("Hive distribution version:\t " + hiveVersion); + System.out.println("Metastore schema version:\t " + dbVersion); + assertCompatibleVersion(hiveVersion, dbVersion); + } + + boolean validateLocations(Connection conn, URI[] defaultServers) throws HiveMetaException { + System.out.println("Validating DFS locations"); + boolean rtn; + rtn = checkMetaStoreDBLocation(conn, defaultServers); + rtn = checkMetaStoreTableLocation(conn, defaultServers) && rtn; + rtn = checkMetaStorePartitionLocation(conn, defaultServers) && rtn; + rtn = checkMetaStoreSkewedColumnsLocation(conn, defaultServers) && rtn; + System.out.println((rtn ? "Succeeded" : "Failed") + " in DFS location validation."); + return rtn; + } + + private String getNameOrID(ResultSet res, int nameInx, int idInx) throws SQLException { + String itemName = res.getString(nameInx); + return (itemName == null || itemName.isEmpty()) ? "ID: " + res.getString(idInx) : "Name: " + itemName; + } + + private boolean checkMetaStoreDBLocation(Connection conn, URI[] defaultServers) + throws HiveMetaException { + String dbLoc; + boolean isValid = true; + int numOfInvalid = 0; + if (needsQuotedIdentifier) { + dbLoc = "select dbt.\"DB_ID\", dbt.\"NAME\", dbt.\"DB_LOCATION_URI\" from \"DBS\" dbt order by dbt.\"DB_ID\" "; + } else { + dbLoc = "select dbt.DB_ID, dbt.NAME, dbt.DB_LOCATION_URI from DBS dbt order by dbt.DB_ID"; + } + + try(Statement stmt = conn.createStatement(); + ResultSet res = stmt.executeQuery(dbLoc)) { + while (res.next()) { + String locValue = res.getString(3); + String dbName = getNameOrID(res,2,1); + if (!checkLocation("Database " + dbName, locValue, defaultServers)) { + numOfInvalid++; + } + } + } catch (SQLException e) { + throw new HiveMetaException("Failed to get DB Location Info.", e); + } + if (numOfInvalid > 0) { + isValid = false; + } + return isValid; + } + + private boolean checkMetaStoreTableLocation(Connection conn, URI[] defaultServers) + throws HiveMetaException { + String tabLoc, tabIDRange; + boolean isValid = true; + int numOfInvalid = 0; + if (needsQuotedIdentifier) { + tabIDRange = "select max(\"TBL_ID\"), min(\"TBL_ID\") from \"TBLS\" "; + } else { + tabIDRange = "select max(TBL_ID), min(TBL_ID) from TBLS"; + } + + if (needsQuotedIdentifier) { + tabLoc = "select tbl.\"TBL_ID\", tbl.\"TBL_NAME\", sd.\"LOCATION\", dbt.\"DB_ID\", dbt.\"NAME\" from \"TBLS\" tbl inner join " + + "\"SDS\" sd on tbl.\"SD_ID\" = sd.\"SD_ID\" and tbl.\"TBL_TYPE\" != '" + TableType.VIRTUAL_VIEW + + "' and tbl.\"TBL_ID\" >= ? and tbl.\"TBL_ID\"<= ? " + "inner join \"DBS\" dbt on tbl.\"DB_ID\" = dbt.\"DB_ID\" order by tbl.\"TBL_ID\" "; + } else { + tabLoc = "select tbl.TBL_ID, tbl.TBL_NAME, sd.LOCATION, dbt.DB_ID, dbt.NAME from TBLS tbl join SDS sd on tbl.SD_ID = sd.SD_ID and tbl.TBL_TYPE !='" + + TableType.VIRTUAL_VIEW + "' and tbl.TBL_ID >= ? and tbl.TBL_ID <= ? inner join DBS dbt on tbl.DB_ID = dbt.DB_ID order by tbl.TBL_ID"; + } + + long maxID = 0, minID = 0; + long rtnSize = 2000; + + try { + Statement stmt = conn.createStatement(); + ResultSet res = stmt.executeQuery(tabIDRange); + if (res.next()) { + maxID = res.getLong(1); + minID = res.getLong(2); + } + res.close(); + stmt.close(); + PreparedStatement pStmt = conn.prepareStatement(tabLoc); + while (minID <= maxID) { + pStmt.setLong(1, minID); + pStmt.setLong(2, minID + rtnSize); + res = pStmt.executeQuery(); + while (res.next()) { + String locValue = res.getString(3); + String entity = "Database " + getNameOrID(res, 5, 4) + + ", Table " + getNameOrID(res,2,1); + if (!checkLocation(entity, locValue, defaultServers)) { + numOfInvalid++; + } + } + res.close(); + minID += rtnSize + 1; + + } + pStmt.close(); + + } catch (SQLException e) { + throw new HiveMetaException("Failed to get Table Location Info.", e); + } + if (numOfInvalid > 0) { + isValid = false; + } + return isValid; + } + + private boolean checkMetaStorePartitionLocation(Connection conn, URI[] defaultServers) + throws HiveMetaException { + String partLoc, partIDRange; + boolean isValid = true; + int numOfInvalid = 0; + if (needsQuotedIdentifier) { + partIDRange = "select max(\"PART_ID\"), min(\"PART_ID\") from \"PARTITIONS\" "; + } else { + partIDRange = "select max(PART_ID), min(PART_ID) from PARTITIONS"; + } + + if (needsQuotedIdentifier) { + partLoc = "select pt.\"PART_ID\", pt.\"PART_NAME\", sd.\"LOCATION\", tbl.\"TBL_ID\", tbl.\"TBL_NAME\",dbt.\"DB_ID\", dbt.\"NAME\" from \"PARTITIONS\" pt " + + "inner join \"SDS\" sd on pt.\"SD_ID\" = sd.\"SD_ID\" and pt.\"PART_ID\" >= ? and pt.\"PART_ID\"<= ? " + + " inner join \"TBLS\" tbl on pt.\"TBL_ID\" = tbl.\"TBL_ID\" inner join " + + "\"DBS\" dbt on tbl.\"DB_ID\" = dbt.\"DB_ID\" order by tbl.\"TBL_ID\" "; + } else { + partLoc = "select pt.PART_ID, pt.PART_NAME, sd.LOCATION, tbl.TBL_ID, tbl.TBL_NAME, dbt.DB_ID, dbt.NAME from PARTITIONS pt " + + "inner join SDS sd on pt.SD_ID = sd.SD_ID and pt.PART_ID >= ? and pt.PART_ID <= ? " + + "inner join TBLS tbl on tbl.TBL_ID = pt.TBL_ID inner join DBS dbt on tbl.DB_ID = dbt.DB_ID order by tbl.TBL_ID "; + } + + long maxID = 0, minID = 0; + long rtnSize = 2000; + + try { + Statement stmt = conn.createStatement(); + ResultSet res = stmt.executeQuery(partIDRange); + if (res.next()) { + maxID = res.getLong(1); + minID = res.getLong(2); + } + res.close(); + stmt.close(); + PreparedStatement pStmt = conn.prepareStatement(partLoc); + while (minID <= maxID) { + pStmt.setLong(1, minID); + pStmt.setLong(2, minID + rtnSize); + res = pStmt.executeQuery(); + while (res.next()) { + String locValue = res.getString(3); + String entity = "Database " + getNameOrID(res,7,6) + + ", Table " + getNameOrID(res,5,4) + + ", Partition " + getNameOrID(res,2,1); + if (!checkLocation(entity, locValue, defaultServers)) { + numOfInvalid++; + } + } + res.close(); + minID += rtnSize + 1; + } + pStmt.close(); + } catch (SQLException e) { + throw new HiveMetaException("Failed to get Partiton Location Info.", e); + } + if (numOfInvalid > 0) { + isValid = false; + } + return isValid; + } + + private boolean checkMetaStoreSkewedColumnsLocation(Connection conn, URI[] defaultServers) + throws HiveMetaException { + String skewedColLoc, skewedColIDRange; + boolean isValid = true; + int numOfInvalid = 0; + if (needsQuotedIdentifier) { + skewedColIDRange = "select max(\"STRING_LIST_ID_KID\"), min(\"STRING_LIST_ID_KID\") from \"SKEWED_COL_VALUE_LOC_MAP\" "; + } else { + skewedColIDRange = "select max(STRING_LIST_ID_KID), min(STRING_LIST_ID_KID) from SKEWED_COL_VALUE_LOC_MAP"; + } + + if (needsQuotedIdentifier) { + skewedColLoc = "select t.\"TBL_NAME\", t.\"TBL_ID\", sk.\"STRING_LIST_ID_KID\", sk.\"LOCATION\", db.\"NAME\", db.\"DB_ID\" " + + " from \"TBLS\" t, \"SDS\" s, \"DBS\" db, \"SKEWED_COL_VALUE_LOC_MAP\" sk " + + "where sk.\"SD_ID\" = s.\"SD_ID\" and s.\"SD_ID\" = t.\"SD_ID\" and t.\"DB_ID\" = db.\"DB_ID\" and " + + "sk.\"STRING_LIST_ID_KID\" >= ? and sk.\"STRING_LIST_ID_KID\" <= ? order by t.\"TBL_ID\" "; + } else { + skewedColLoc = "select t.TBL_NAME, t.TBL_ID, sk.STRING_LIST_ID_KID, sk.LOCATION, db.NAME, db.DB_ID from TBLS t, SDS s, DBS db, SKEWED_COL_VALUE_LOC_MAP sk " + + "where sk.SD_ID = s.SD_ID and s.SD_ID = t.SD_ID and t.DB_ID = db.DB_ID and sk.STRING_LIST_ID_KID >= ? and sk.STRING_LIST_ID_KID <= ? order by t.TBL_ID "; + } + + long maxID = 0, minID = 0; + long rtnSize = 2000; + + try { + Statement stmt = conn.createStatement(); + ResultSet res = stmt.executeQuery(skewedColIDRange); + if (res.next()) { + maxID = res.getLong(1); + minID = res.getLong(2); + } + res.close(); + stmt.close(); + PreparedStatement pStmt = conn.prepareStatement(skewedColLoc); + while (minID <= maxID) { + pStmt.setLong(1, minID); + pStmt.setLong(2, minID + rtnSize); + res = pStmt.executeQuery(); + while (res.next()) { + String locValue = res.getString(4); + String entity = "Database " + getNameOrID(res,5,6) + + ", Table " + getNameOrID(res,1,2) + + ", String list " + res.getString(3); + if (!checkLocation(entity, locValue, defaultServers)) { + numOfInvalid++; + } + } + res.close(); + minID += rtnSize + 1; + } + pStmt.close(); + } catch (SQLException e) { + throw new HiveMetaException("Failed to get skewed columns location info.", e); + } + if (numOfInvalid > 0) { + isValid = false; + } + return isValid; + } + + /** + * Check if the location is valid for the given entity + * @param entity the entity to represent a database, partition or table + * @param entityLocation the location + * @param defaultServers a list of the servers that the location needs to match. + * The location host needs to match one of the given servers. + * If empty, then no check against such list. + * @return true if the location is valid + */ + private boolean checkLocation( + String entity, + String entityLocation, + URI[] defaultServers) { + boolean isValid = true; + if (entityLocation == null) { + logAndPrintToError(entity + ", Error: empty location"); + isValid = false; + } else { + try { + URI currentUri = new Path(entityLocation).toUri(); + String scheme = currentUri.getScheme(); + String path = currentUri.getPath(); + if (StringUtils.isEmpty(scheme)) { + logAndPrintToError(entity + ", Location: "+ entityLocation + ", Error: missing location scheme."); + isValid = false; + } else if (StringUtils.isEmpty(path)) { + logAndPrintToError(entity + ", Location: "+ entityLocation + ", Error: missing location path."); + isValid = false; + } else if (ArrayUtils.isNotEmpty(defaultServers) && currentUri.getAuthority() != null) { + String authority = currentUri.getAuthority(); + boolean matchServer = false; + for(URI server : defaultServers) { + if (StringUtils.equalsIgnoreCase(server.getScheme(), scheme) && + StringUtils.equalsIgnoreCase(server.getAuthority(), authority)) { + matchServer = true; + break; + } + } + if (!matchServer) { + logAndPrintToError(entity + ", Location: " + entityLocation + ", Error: mismatched server."); + isValid = false; + } + } + + // if there is no path element other than "/", report it but not fail + if (isValid && StringUtils.containsOnly(path, "/")) { + logAndPrintToError(entity + ", Location: "+ entityLocation + ", Warn: location set to root, not a recommended config."); + } + } catch (Exception pe) { + logAndPrintToError(entity + ", Error: invalid location - " + pe.getMessage()); + isValid =false; + } + } + + return isValid; + } + + // test the connection metastore using the config property + private void testConnectionToMetastore() throws HiveMetaException { + Connection conn = getConnectionToMetastore(true); + try { + conn.close(); + } catch (SQLException e) { + throw new HiveMetaException("Failed to close metastore connection", e); + } + } + + + /** + * check if the current schema version in metastore matches the Hive version + */ + @VisibleForTesting + void verifySchemaVersion() throws HiveMetaException { + // don't check version if its a dry run + if (dryRun) { + return; + } + String newSchemaVersion = metaStoreSchemaInfo.getMetaStoreSchemaVersion(getConnectionInfo(false)); + // verify that the new version is added to schema + assertCompatibleVersion(metaStoreSchemaInfo.getHiveSchemaVersion(), newSchemaVersion); + } + + private void assertCompatibleVersion(String hiveSchemaVersion, String dbSchemaVersion) + throws HiveMetaException { + if (!metaStoreSchemaInfo.isVersionCompatible(hiveSchemaVersion, dbSchemaVersion)) { + throw new HiveMetaException("Metastore schema version is not compatible. Hive Version: " + + hiveSchemaVersion + ", Database Schema Version: " + dbSchemaVersion); + } + } + + /** + * Perform metastore schema upgrade. extract the current schema version from metastore + */ + void doUpgrade() throws HiveMetaException { + String fromVersion = + metaStoreSchemaInfo.getMetaStoreSchemaVersion(getConnectionInfo(false)); + if (fromVersion == null || fromVersion.isEmpty()) { + throw new HiveMetaException("Schema version not stored in the metastore. " + + "Metastore schema is too old or corrupt. Try specifying the version manually"); + } + doUpgrade(fromVersion); + } + + private MetaStoreConnectionInfo getConnectionInfo(boolean printInfo) { + return new MetaStoreConnectionInfo(userName, passWord, url, driver, printInfo, conf, + dbType); + } + /** + * Perform metastore schema upgrade + * + * @param fromSchemaVer + * Existing version of the metastore. If null, then read from the metastore + */ + void doUpgrade(String fromSchemaVer) throws HiveMetaException { + if (metaStoreSchemaInfo.getHiveSchemaVersion().equals(fromSchemaVer)) { + System.out.println("No schema upgrade required from version " + fromSchemaVer); + return; + } + // Find the list of scripts to execute for this upgrade + List upgradeScripts = + metaStoreSchemaInfo.getUpgradeScripts(fromSchemaVer); + testConnectionToMetastore(); + System.out.println("Starting upgrade metastore schema from version " + + fromSchemaVer + " to " + metaStoreSchemaInfo.getHiveSchemaVersion()); + String scriptDir = metaStoreSchemaInfo.getMetaStoreScriptDir(); + try { + for (String scriptFile : upgradeScripts) { + System.out.println("Upgrade script " + scriptFile); + if (!dryRun) { + runPreUpgrade(scriptDir, scriptFile); + runSqlLine(scriptDir, scriptFile); + System.out.println("Completed " + scriptFile); + } + } + } catch (IOException eIO) { + throw new HiveMetaException( + "Upgrade FAILED! Metastore state would be inconsistent !!", eIO); + } + + // Revalidated the new version after upgrade + verifySchemaVersion(); + } + + /** + * Initialize the metastore schema to current version + * + */ + void doInit() throws HiveMetaException { + doInit(metaStoreSchemaInfo.getHiveSchemaVersion()); + + // Revalidated the new version after upgrade + verifySchemaVersion(); + } + + /** + * Initialize the metastore schema + * + * @param toVersion + * If null then current hive version is used + */ + void doInit(String toVersion) throws HiveMetaException { + testConnectionToMetastore(); + System.out.println("Starting metastore schema initialization to " + toVersion); + + String initScriptDir = metaStoreSchemaInfo.getMetaStoreScriptDir(); + String initScriptFile = metaStoreSchemaInfo.generateInitFileName(toVersion); + + try { + System.out.println("Initialization script " + initScriptFile); + if (!dryRun) { + runSqlLine(initScriptDir, initScriptFile); + System.out.println("Initialization script completed"); + } + } catch (IOException e) { + throw new HiveMetaException("Schema initialization FAILED!" + + " Metastore state would be inconsistent !!", e); + } + } + + private void doCreateUser() throws HiveMetaException { + testConnectionToMetastore(); + System.out.println("Starting user creation"); + + String scriptDir = metaStoreSchemaInfo.getMetaStoreScriptDir(); + String protoCreateFile = metaStoreSchemaInfo.getCreateUserScript(); + + try { + File createFile = subUserAndPassword(scriptDir, protoCreateFile); + System.out.println("Creation script " + createFile.getAbsolutePath()); + if (!dryRun) { + if ("oracle".equals(dbType)) oracleCreateUserHack(createFile); + else runSqlLine(createFile.getParent(), createFile.getName()); + System.out.println("User creation completed"); + } + } catch (IOException e) { + throw new HiveMetaException("User creation FAILED!" + + " Metastore unusable !!", e); + } + } + + private File subUserAndPassword(String parent, String filename) throws IOException { + File createFile = File.createTempFile("create-hive-user-" + dbType, ".sql"); + BufferedWriter writer = new BufferedWriter(new FileWriter(createFile)); + File proto = new File(parent, filename); + BufferedReader reader = new BufferedReader(new FileReader(proto)); + reader.lines() + .map(s -> s.replace("_REPLACE_WITH_USER_", hiveUser) + .replace("_REPLACE_WITH_PASSWD_", hivePasswd) + .replace("_REPLACE_WITH_DB_", hiveDb)) + .forEach(s -> { + try { + writer.write(s); + writer.newLine(); + } catch (IOException e) { + throw new RuntimeException("Unable to write to tmp file ", e); + } + }); + reader.close(); + writer.close(); + return createFile; + } + + private void oracleCreateUserHack(File createFile) throws HiveMetaException { + LOG.debug("Found oracle, hacking our way through it rather than using SqlLine"); + try (BufferedReader reader = new BufferedReader(new FileReader(createFile))) { + try (Connection conn = getConnectionToMetastore(false)) { + try (Statement stmt = conn.createStatement()) { + reader.lines() + .forEach(s -> { + assert s.charAt(s.length() - 1) == ';'; + try { + stmt.execute(s.substring(0, s.length() - 1)); + } catch (SQLException e) { + LOG.error("statement <" + s.substring(0, s.length() - 2) + "> failed", e); + throw new RuntimeException(e); + } + }); + } + } + } catch (IOException e) { + LOG.error("Caught IOException trying to read modified create user script " + + createFile.getAbsolutePath(), e); + throw new HiveMetaException(e); + } catch (HiveMetaException e) { + LOG.error("Failed to connect to RDBMS", e); + throw e; + } catch (SQLException e) { + LOG.error("Got SQLException", e); + } + } + + private int doValidate() throws HiveMetaException { + System.out.println("Starting metastore validation\n"); + Connection conn = getConnectionToMetastore(false); + boolean success = true; + try { + if (validateSchemaVersions()) { + System.out.println("[SUCCESS]\n"); + } else { + success = false; + System.out.println("[FAIL]\n"); + } + if (validateSequences(conn)) { + System.out.println("[SUCCESS]\n"); + } else { + success = false; + System.out.println("[FAIL]\n"); + } + if (validateSchemaTables(conn)) { + System.out.println("[SUCCESS]\n"); + } else { + success = false; + System.out.println("[FAIL]\n"); + } + if (validateLocations(conn, this.validationServers)) { + System.out.println("[SUCCESS]\n"); + } else { + System.out.println("[WARN]\n"); + } + if (validateColumnNullValues(conn)) { + System.out.println("[SUCCESS]\n"); + } else { + System.out.println("[WARN]\n"); + } + } finally { + if (conn != null) { + try { + conn.close(); + } catch (SQLException e) { + // Not a lot you can do here. + } + } + } + + System.out.print("Done with metastore validation: "); + if (!success) { + System.out.println("[FAIL]"); + return 1; + } else { + System.out.println("[SUCCESS]"); + return 0; + } + } + + boolean validateSequences(Connection conn) throws HiveMetaException { + Map> seqNameToTable = + new ImmutableMap.Builder>() + .put("MDatabase", Pair.of("DBS", "DB_ID")) + .put("MRole", Pair.of("ROLES", "ROLE_ID")) + .put("MGlobalPrivilege", Pair.of("GLOBAL_PRIVS", "USER_GRANT_ID")) + .put("MTable", Pair.of("TBLS","TBL_ID")) + .put("MStorageDescriptor", Pair.of("SDS", "SD_ID")) + .put("MSerDeInfo", Pair.of("SERDES", "SERDE_ID")) + .put("MColumnDescriptor", Pair.of("CDS", "CD_ID")) + .put("MTablePrivilege", Pair.of("TBL_PRIVS", "TBL_GRANT_ID")) + .put("MTableColumnStatistics", Pair.of("TAB_COL_STATS", "CS_ID")) + .put("MPartition", Pair.of("PARTITIONS", "PART_ID")) + .put("MPartitionColumnStatistics", Pair.of("PART_COL_STATS", "CS_ID")) + .put("MFunction", Pair.of("FUNCS", "FUNC_ID")) + .put("MIndex", Pair.of("IDXS", "INDEX_ID")) + .put("MStringList", Pair.of("SKEWED_STRING_LIST", "STRING_LIST_ID")) + .build(); + + System.out.println("Validating sequence number for SEQUENCE_TABLE"); + + boolean isValid = true; + try { + Statement stmt = conn.createStatement(); + for (String seqName : seqNameToTable.keySet()) { + String tableName = seqNameToTable.get(seqName).getLeft(); + String tableKey = seqNameToTable.get(seqName).getRight(); + String fullSequenceName = "org.apache.hadoop.hive.metastore.model." + seqName; + String seqQuery = needsQuotedIdentifier ? + ("select t.\"NEXT_VAL\" from \"SEQUENCE_TABLE\" t WHERE t.\"SEQUENCE_NAME\"=? order by t.\"SEQUENCE_NAME\" ") + : ("select t.NEXT_VAL from SEQUENCE_TABLE t WHERE t.SEQUENCE_NAME=? order by t.SEQUENCE_NAME "); + String maxIdQuery = needsQuotedIdentifier ? + ("select max(\"" + tableKey + "\") from \"" + tableName + "\"") + : ("select max(" + tableKey + ") from " + tableName); + + ResultSet res = stmt.executeQuery(maxIdQuery); + if (res.next()) { + long maxId = res.getLong(1); + if (maxId > 0) { + PreparedStatement pStmt = conn.prepareStatement(seqQuery); + pStmt.setString(1, fullSequenceName); + ResultSet resSeq = pStmt.executeQuery(); + if (!resSeq.next()) { + isValid = false; + logAndPrintToError("Missing SEQUENCE_NAME " + seqName + " from SEQUENCE_TABLE"); + } else if (resSeq.getLong(1) < maxId) { + isValid = false; + logAndPrintToError("NEXT_VAL for " + seqName + " in SEQUENCE_TABLE < max(" + + tableKey + ") in " + tableName); + } + } + } + } + + System.out.println((isValid ? "Succeeded" :"Failed") + " in sequence number validation for SEQUENCE_TABLE."); + return isValid; + } catch(SQLException e) { + throw new HiveMetaException("Failed to validate sequence number for SEQUENCE_TABLE", e); + } + } + + boolean validateSchemaVersions() throws HiveMetaException { + System.out.println("Validating schema version"); + try { + String newSchemaVersion = metaStoreSchemaInfo.getMetaStoreSchemaVersion(getConnectionInfo(false)); + assertCompatibleVersion(metaStoreSchemaInfo.getHiveSchemaVersion(), newSchemaVersion); + } catch (HiveMetaException hme) { + if (hme.getMessage().contains("Metastore schema version is not compatible") + || hme.getMessage().contains("Multiple versions were found in metastore") + || hme.getMessage().contains("Could not find version info in metastore VERSION table")) { + logAndPrintToError(hme.getMessage()); + System.out.println("Failed in schema version validation."); + return false; + } else { + throw hme; + } + } + System.out.println("Succeeded in schema version validation."); + return true; + } + + boolean validateSchemaTables(Connection conn) throws HiveMetaException { + String version; + ResultSet rs = null; + DatabaseMetaData metadata; + List dbTables = new ArrayList<>(); + List schemaTables = new ArrayList<>(); + List subScripts = new ArrayList<>(); + Connection hmsConn; + + System.out.println("Validating metastore schema tables"); + try { + version = metaStoreSchemaInfo.getMetaStoreSchemaVersion(getConnectionInfo(false)); + } catch (HiveMetaException he) { + logAndPrintToError("Failed to determine schema version from Hive Metastore DB. " + he.getMessage()); + System.out.println("Failed in schema table validation."); + LOG.debug("Failed to determine schema version from Hive Metastore DB," + he.getMessage()); + return false; + } + + // re-open the hms connection + hmsConn = getConnectionToMetastore(false); + + LOG.debug("Validating tables in the schema for version " + version); + try { + metadata = conn.getMetaData(); + String[] types = {"TABLE"}; + rs = metadata.getTables(null, hmsConn.getSchema(), "%", types); + String table; + + while (rs.next()) { + table = rs.getString("TABLE_NAME"); + dbTables.add(table.toLowerCase()); + LOG.debug("Found table " + table + " in HMS dbstore"); + } + } catch (SQLException e) { + throw new HiveMetaException("Failed to retrieve schema tables from Hive Metastore DB," + e.getMessage()); + } finally { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + // Not a lot you can do here. + } + } + } + + // parse the schema file to determine the tables that are expected to exist + // we are using oracle schema because it is simpler to parse, no quotes or backticks etc + String baseDir = new File(metaStoreSchemaInfo.getMetaStoreScriptDir()).getParent(); + String schemaFile = new File(metaStoreSchemaInfo.getMetaStoreScriptDir(), + metaStoreSchemaInfo.generateInitFileName(version)).getPath(); + try { + LOG.debug("Parsing schema script " + schemaFile); + subScripts.addAll(findCreateTable(schemaFile, schemaTables)); + while (subScripts.size() > 0) { + schemaFile = baseDir + "/" + dbType + "/" + subScripts.remove(0); + LOG.debug("Parsing subscript " + schemaFile); + subScripts.addAll(findCreateTable(schemaFile, schemaTables)); + } + } catch (Exception e) { + logAndPrintToError("Exception in parsing schema file. Cause:" + e.getMessage()); + System.out.println("Failed in schema table validation."); + return false; + } + + LOG.debug("Schema tables:[ " + Arrays.toString(schemaTables.toArray()) + " ]"); + LOG.debug("DB tables:[ " + Arrays.toString(dbTables.toArray()) + " ]"); + // now diff the lists + schemaTables.removeAll(dbTables); + if (schemaTables.size() > 0) { + Collections.sort(schemaTables); + logAndPrintToError("Table(s) [ " + Arrays.toString(schemaTables.toArray()) + + " ] are missing from the metastore database schema."); + System.out.println("Failed in schema table validation."); + return false; + } else { + System.out.println("Succeeded in schema table validation."); + return true; + } + } + + private List findCreateTable(String path, List tableList) + throws Exception { + NestedScriptParser sp = HiveSchemaHelper.getDbCommandParser(dbType, true); + Matcher matcher; + Pattern regexp; + List subs = new ArrayList<>(); + int groupNo = 2; + + regexp = Pattern.compile("CREATE TABLE(\\s+IF NOT EXISTS)?\\s+(\\S+).*"); + + if (!(new File(path)).exists()) { + throw new Exception(path + " does not exist. Potentially incorrect version in the metastore VERSION table"); + } + + try ( + BufferedReader reader = new BufferedReader(new FileReader(path)) + ){ + String line; + while ((line = reader.readLine()) != null) { + if (sp.isNestedScript(line)) { + String subScript; + subScript = sp.getScriptName(line); + LOG.debug("Schema subscript " + subScript + " found"); + subs.add(subScript); + continue; + } + line = line.replaceAll("( )+", " "); //suppress multi-spaces + line = line.replaceAll("\\(", " "); + line = line.replaceAll("IF NOT EXISTS ", ""); + line = line.replaceAll("`",""); + line = line.replaceAll("'",""); + line = line.replaceAll("\"",""); + matcher = regexp.matcher(line); + + if (matcher.find()) { + String table = matcher.group(groupNo); + if (dbType.equals("derby")) + table = table.replaceAll("APP\\.",""); + tableList.add(table.toLowerCase()); + LOG.debug("Found table " + table + " in the schema"); + } + } + } catch (IOException ex){ + throw new Exception(ex.getMessage()); + } + + return subs; + } + + boolean validateColumnNullValues(Connection conn) throws HiveMetaException { + System.out.println("Validating columns for incorrect NULL values."); + boolean isValid = true; + try { + Statement stmt = conn.createStatement(); + String tblQuery = needsQuotedIdentifier ? + ("select t.* from \"TBLS\" t WHERE t.\"SD_ID\" IS NULL and (t.\"TBL_TYPE\"='" + TableType.EXTERNAL_TABLE + "' or t.\"TBL_TYPE\"='" + TableType.MANAGED_TABLE + "') order by t.\"TBL_ID\" ") + : ("select t.* from TBLS t WHERE t.SD_ID IS NULL and (t.TBL_TYPE='" + TableType.EXTERNAL_TABLE + "' or t.TBL_TYPE='" + TableType.MANAGED_TABLE + "') order by t.TBL_ID "); + + ResultSet res = stmt.executeQuery(tblQuery); + while (res.next()) { + long tableId = res.getLong("TBL_ID"); + String tableName = res.getString("TBL_NAME"); + String tableType = res.getString("TBL_TYPE"); + isValid = false; + logAndPrintToError("SD_ID in TBLS should not be NULL for Table Name=" + tableName + ", Table ID=" + tableId + ", Table Type=" + tableType); + } + + System.out.println((isValid ? "Succeeded" : "Failed") + " in column validation for incorrect NULL values."); + return isValid; + } catch(SQLException e) { + throw new HiveMetaException("Failed to validate columns for incorrect NULL values", e); + } + } + + /** + * Run pre-upgrade scripts corresponding to a given upgrade script, + * if any exist. The errors from pre-upgrade are ignored. + * Pre-upgrade scripts typically contain setup statements which + * may fail on some database versions and failure is ignorable. + * + * @param scriptDir upgrade script directory name + * @param scriptFile upgrade script file name + */ + private void runPreUpgrade(String scriptDir, String scriptFile) { + for (int i = 0;; i++) { + String preUpgradeScript = + metaStoreSchemaInfo.getPreUpgradeScriptName(i, scriptFile); + File preUpgradeScriptFile = new File(scriptDir, preUpgradeScript); + if (!preUpgradeScriptFile.isFile()) { + break; + } + + try { + runSqlLine(scriptDir, preUpgradeScript); + System.out.println("Completed " + preUpgradeScript); + } catch (Exception e) { + // Ignore the pre-upgrade script errors + logAndPrintToError("Warning in pre-upgrade script " + preUpgradeScript + ": " + + e.getMessage()); + if (verbose) { + e.printStackTrace(); + } + } + } + } + + /*** + * Run beeline with the given metastore script. Flatten the nested scripts + * into single file. + */ + private void runSqlLine(String scriptDir, String scriptFile) + throws IOException, HiveMetaException { + + // This no longer does expansions of run commands in the files as it used to. Instead it + // depends on the developers to have already unrolled those in the files. + runSqlLine(scriptDir + File.separatorChar + scriptFile); + } + + // Generate the beeline args per hive conf and execute the given script + void runSqlLine(String sqlScriptFile) throws IOException { + CommandBuilder builder = new CommandBuilder(conf, url, driver, + userName, passWord, sqlScriptFile); + + // run the script using SqlLine + SqlLine sqlLine = new SqlLine(); + ByteArrayOutputStream outputForLog = null; + if (!verbose) { + OutputStream out; + if (LOG.isDebugEnabled()) { + out = outputForLog = new ByteArrayOutputStream(); + } else { + out = new NullOutputStream(); + } + sqlLine.setOutputStream(new PrintStream(out)); + System.setProperty("sqlline.silent", "true"); + } + //sqlLine.getOpts().setAllowMultiLineCommand(false); + //System.setProperty("sqlline.isolation","TRANSACTION_READ_COMMITTED"); + // We can be pretty sure that an entire line can be processed as a single command since + // we always add a line separator at the end while calling dbCommandParser.buildCommand. + //sqlLine.getOpts().setEntireLineAsCommand(true); + LOG.info("Going to run command <" + builder.buildToLog() + ">"); + SqlLine.Status status = sqlLine.begin(builder.buildToRun(), null, false); + if (LOG.isDebugEnabled() && outputForLog != null) { + LOG.debug("Received following output from Sqlline:"); + LOG.debug(outputForLog.toString("UTF-8")); + } + if (status != SqlLine.Status.OK) { + throw new IOException("Schema script failed, errorcode " + status); + } + } + + static class CommandBuilder { + private final Configuration conf; + private final String userName; + private final String password; + private final String sqlScriptFile; + private final String driver; + private final String url; + + CommandBuilder(Configuration conf, String url, String driver, + String userName, String password, String sqlScriptFile) { + this.conf = conf; + this.userName = userName; + this.password = password; + this.url = url; + this.driver = driver; + this.sqlScriptFile = sqlScriptFile; + } + + String[] buildToRun() throws IOException { + return argsWith(password); + } + + String buildToLog() throws IOException { + logScript(); + return StringUtils.join(argsWith(PASSWD_MASK), " "); + } + + private String[] argsWith(String password) throws IOException { + return new String[] + { + "-u", url == null ? MetastoreConf.getVar(conf, ConfVars.CONNECTURLKEY) : url, + "-d", driver == null ? MetastoreConf.getVar(conf, ConfVars.CONNECTION_DRIVER) : driver, + "-n", userName, + "-p", password, + "--isolation=TRANSACTION_READ_COMMITTED", + "-f", sqlScriptFile + }; + } + + private void logScript() throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Going to invoke file that contains:"); + try (BufferedReader reader = new BufferedReader(new FileReader(sqlScriptFile))) { + String line; + while ((line = reader.readLine()) != null) { + LOG.debug("script: " + line); + } + } + } + } + } + + // Create the required command line options + @SuppressWarnings("static-access") + private static void initOptions(Options cmdLineOptions) { + Option help = new Option("help", "print this message"); + Option upgradeOpt = new Option("upgradeSchema", "Schema upgrade"); + Option upgradeFromOpt = OptionBuilder.withArgName("upgradeFrom").hasArg(). + withDescription("Schema upgrade from a version"). + create("upgradeSchemaFrom"); + Option initOpt = new Option("initSchema", "Schema initialization"); + Option initToOpt = OptionBuilder.withArgName("initTo").hasArg(). + withDescription("Schema initialization to a version"). + create("initSchemaTo"); + Option infoOpt = new Option("info", "Show config and schema details"); + Option validateOpt = new Option("validate", "Validate the database"); + Option createUserOpt = new Option("createUser", "Create the Hive user, use admin user and " + + "password with this"); + + OptionGroup optGroup = new OptionGroup(); + optGroup.addOption(upgradeOpt).addOption(initOpt). + addOption(help).addOption(upgradeFromOpt).addOption(createUserOpt) + .addOption(initToOpt).addOption(infoOpt).addOption(validateOpt); + optGroup.setRequired(true); + + Option userNameOpt = OptionBuilder.withArgName("user") + .hasArgs() + .withDescription("Override config file user name") + .create("userName"); + Option passwdOpt = OptionBuilder.withArgName("password") + .hasArgs() + .withDescription("Override config file password") + .create("passWord"); + Option hiveUserOpt = OptionBuilder + .hasArg() + .withDescription("Hive user (for use with createUser)") + .create("hiveUser"); + Option hivePasswdOpt = OptionBuilder + .hasArg() + .withDescription("Hive password (for use with createUser)") + .create("hivePassword"); + Option hiveDbOpt = OptionBuilder + .hasArg() + .withDescription("Hive database (for use with createUser)") + .create("hiveDb"); + Option dbTypeOpt = OptionBuilder.withArgName("databaseType") + .hasArgs().withDescription("Metastore database type") + .create("dbType"); + Option urlOpt = OptionBuilder.withArgName("url") + .hasArgs().withDescription("connection url to the database") + .create("url"); + Option driverOpt = OptionBuilder.withArgName("driver") + .hasArgs().withDescription("driver name for connection") + .create("driver"); + Option dbOpts = OptionBuilder.withArgName("databaseOpts") + .hasArgs().withDescription("Backend DB specific options") + .create("dbOpts"); + Option dryRunOpt = new Option("dryRun", "list SQL scripts (no execute)"); + Option verboseOpt = new Option("verbose", "only print SQL statements"); + Option serversOpt = OptionBuilder.withArgName("serverList") + .hasArgs().withDescription("a comma-separated list of servers used in location validation in the format of scheme://authority (e.g. hdfs://localhost:8000)") + .create("servers"); + cmdLineOptions.addOption(help); + cmdLineOptions.addOption(dryRunOpt); + cmdLineOptions.addOption(userNameOpt); + cmdLineOptions.addOption(passwdOpt); + cmdLineOptions.addOption(dbTypeOpt); + cmdLineOptions.addOption(verboseOpt); + cmdLineOptions.addOption(urlOpt); + cmdLineOptions.addOption(driverOpt); + cmdLineOptions.addOption(dbOpts); + cmdLineOptions.addOption(serversOpt); + cmdLineOptions.addOption(hiveUserOpt); + cmdLineOptions.addOption(hivePasswdOpt); + cmdLineOptions.addOption(hiveDbOpt); + cmdLineOptions.addOptionGroup(optGroup); + } + + private static void logAndPrintToError(String errmsg) { + LOG.error(errmsg); + System.err.println(errmsg); + } + + public static void main(String[] args) { + System.exit(run(args)); + } + + public static int run(String[] args) { + LOG.debug("Going to run command: " + StringUtils.join(args, " ")); + CommandLineParser parser = new GnuParser(); + CommandLine line; + String dbType; + String schemaVer; + Options cmdLineOptions = new Options(); + + // Argument handling + initOptions(cmdLineOptions); + try { + line = parser.parse(cmdLineOptions, args); + } catch (ParseException e) { + logAndPrintToError("HiveSchemaTool:Parsing failed. Reason: " + e.getLocalizedMessage()); + return usage(cmdLineOptions); + } + + assert line != null; + if (line.hasOption("help")) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("schemaTool", cmdLineOptions); + return 1; + } + + if (line.hasOption("dbType")) { + dbType = line.getOptionValue("dbType"); + if ((!dbType.equalsIgnoreCase(HiveSchemaHelper.DB_DERBY) && + !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_MSSQL) && + !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_MYSQL) && + !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_POSTGRACE) && !dbType + .equalsIgnoreCase(HiveSchemaHelper.DB_ORACLE))) { + logAndPrintToError("Unsupported dbType " + dbType); + return usage(cmdLineOptions); + } + } else { + logAndPrintToError("no dbType supplied"); + return usage(cmdLineOptions); + } + + System.setProperty(ConfVars.SCHEMA_VERIFICATION.toString(), "true"); + try { + MetastoreSchemaTool schemaTool = new MetastoreSchemaTool(dbType); + + if (line.hasOption("userName")) { + schemaTool.setUserName(line.getOptionValue("userName")); + } else { + schemaTool.setUserName(MetastoreConf.getVar(schemaTool.getConf(), ConfVars.CONNECTION_USER_NAME)); + } + if (line.hasOption("passWord")) { + schemaTool.setPassWord(line.getOptionValue("passWord")); + } else { + try { + schemaTool.setPassWord(MetastoreConf.getPassword(schemaTool.getConf(), ConfVars.PWD)); + } catch (IOException err) { + throw new HiveMetaException("Error getting metastore password", err); + } + } + if (line.hasOption("hiveUser")) { + schemaTool.setHiveUser(line.getOptionValue("hiveUser")); + } + if (line.hasOption("hivePassword")) { + schemaTool.setHivePasswd(line.getOptionValue("hivePassword")); + } + if (line.hasOption("hiveDb")) { + schemaTool.setHiveDb(line.getOptionValue("hiveDb")); + } + if (line.hasOption("url")) { + schemaTool.setUrl(line.getOptionValue("url")); + } + if (line.hasOption("driver")) { + schemaTool.setDriver(line.getOptionValue("driver")); + } + if (line.hasOption("dryRun")) { + schemaTool.setDryRun(true); + } + if (line.hasOption("verbose")) { + schemaTool.setVerbose(true); + } + if (line.hasOption("dbOpts")) { + schemaTool.setDbOpts(line.getOptionValue("dbOpts")); + } + if (line.hasOption("validate") && line.hasOption("servers")) { + schemaTool.setValidationServers(line.getOptionValue("servers")); + } + if (line.hasOption("info")) { + schemaTool.showInfo(); + } else if (line.hasOption("upgradeSchema")) { + schemaTool.doUpgrade(); + } else if (line.hasOption("upgradeSchemaFrom")) { + schemaVer = line.getOptionValue("upgradeSchemaFrom"); + schemaTool.doUpgrade(schemaVer); + } else if (line.hasOption("initSchema")) { + schemaTool.doInit(); + } else if (line.hasOption("initSchemaTo")) { + schemaVer = line.getOptionValue("initSchemaTo"); + schemaTool.doInit(schemaVer); + } else if (line.hasOption("validate")) { + return schemaTool.doValidate(); + } else if (line.hasOption("createUser")) { + schemaTool.doCreateUser(); + } else { + logAndPrintToError("no valid option supplied"); + return usage(cmdLineOptions); + } + } catch (HiveMetaException e) { + logAndPrintToError(e.getMessage()); + if (e.getCause() != null) { + Throwable t = e.getCause(); + logAndPrintToError("Underlying cause: " + + t.getClass().getName() + " : " + + t.getMessage()); + if (e.getCause() instanceof SQLException) { + logAndPrintToError("SQL Error code: " + ((SQLException)t).getErrorCode()); + } + } + if (line.hasOption("verbose")) { + e.printStackTrace(); + } else { + logAndPrintToError("Use --verbose for detailed stacktrace."); + } + logAndPrintToError("*** schemaTool failed ***"); + return 1; + } + System.out.println("schemaTool completed"); + return 0; + + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java new file mode 100644 index 0000000000..7f4d9b0374 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.tools; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.ColumnType; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.thrift.TException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.util.List; + +/** + * This class runs a few simple operations against the server to make sure it runs reasonably. + * Even though it is a test its in the main tree because it needs to be deployed with the server + * to allow smoke testing once the server is installed. + */ +public class SmokeTest { + private static final Logger LOG = LoggerFactory.getLogger(SmokeTest.class); + private static final String dbName = "internal_smoke_test"; + private static final String tableName = "internal_smoke_test_table"; + private static final String partValue = "internal_smoke_test_val1"; + + private SmokeTest() { + + } + + private void runTest(IMetaStoreClient client) throws TException { + LOG.info("Starting smoke test"); + + File dbDir = new File(System.getProperty("java.io.tmpdir"), "internal_smoke_test"); + if (!dbDir.mkdir()) { + throw new RuntimeException("Unable to create direcotory " + dbDir.getAbsolutePath()); + } + dbDir.deleteOnExit(); + + LOG.info("Going to create database " + dbName); + Database db = new DatabaseBuilder() + .setName(dbName) + .setLocation(dbDir.getAbsolutePath()) + .build(); + client.createDatabase(db); + + LOG.info("Going to create table " + tableName); + Table table = new TableBuilder() + .setDbName(db) + .setTableName(tableName) + .addCol("col1", ColumnType.INT_TYPE_NAME) + .addCol("col2", ColumnType.TIMESTAMP_TYPE_NAME) + .addPartCol("pcol1", ColumnType.STRING_TYPE_NAME) + .build(); + client.createTable(table); + + LOG.info("Going to create partition with value " + partValue); + Partition part = new PartitionBuilder() + .fromTable(table) + .addValue("val1") + .build(); + client.add_partition(part); + + LOG.info("Going to list the partitions"); + List parts = client.listPartitions(dbName, tableName, (short)-1); + LOG.info("Fetched: { " + parts.toString() + "}"); + + LOG.info("Going to drop database"); + client.dropDatabase(dbName, true, false, true); + + + LOG.info("Completed smoke test"); + } + + public static void main(String[] args) throws Exception { + SmokeTest test = new SmokeTest(); + Configuration conf = MetastoreConf.newMetastoreConf(); + IMetaStoreClient client = new HiveMetaStoreClient(conf); + test.runTest(client); + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java index 06fe6cb18f..26daeaed56 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java @@ -36,7 +36,7 @@ */ public class LogUtils { - private static final String HIVE_L4J = "hive-log4j2.properties"; + private static final String HIVE_L4J = "metastore-log4j2.properties"; private static final Logger l4j = LoggerFactory.getLogger(LogUtils.class); @SuppressWarnings("serial") diff --git standalone-metastore/src/main/resources/metastore-log4j2.properties standalone-metastore/src/main/resources/metastore-log4j2.properties new file mode 100644 index 0000000000..ec5039b4fb --- /dev/null +++ standalone-metastore/src/main/resources/metastore-log4j2.properties @@ -0,0 +1,71 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +status = INFO +name = MetastoreLog4j2 +packages = org.apache.hadoop.hive.metastore + +# list of properties +property.metastore.log.level = INFO +property.metastore.root.logger = DRFA +property.metastore.log.dir = ${sys:java.io.tmpdir}/${sys:user.name} +property.metastore.log.file = metastore.log +property.hive.perflogger.log.level = INFO + +# list of all appenders +appenders = console, DRFA + +# console appender +appender.console.type = Console +appender.console.name = console +appender.console.target = SYSTEM_ERR +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n + +# daily rolling file appender +appender.DRFA.type = RollingRandomAccessFile +appender.DRFA.name = DRFA +appender.DRFA.fileName = ${sys:metastore.log.dir}/${sys:metastore.log.file} +# Use %pid in the filePattern to append @ to the filename if you want separate log files for different CLI session +appender.DRFA.filePattern = ${sys:metastore.log.dir}/${sys:metastore.log.file}.%d{yyyy-MM-dd} +appender.DRFA.layout.type = PatternLayout +appender.DRFA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n +appender.DRFA.policies.type = Policies +appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy +appender.DRFA.policies.time.interval = 1 +appender.DRFA.policies.time.modulate = true +appender.DRFA.strategy.type = DefaultRolloverStrategy +appender.DRFA.strategy.max = 30 + +# list of all loggers +loggers = DataNucleus, Datastore, JPOX, PerfLogger + +logger.DataNucleus.name = DataNucleus +logger.DataNucleus.level = ERROR + +logger.Datastore.name = Datastore +logger.Datastore.level = ERROR + +logger.JPOX.name = JPOX +logger.JPOX.level = ERROR + +logger.PerfLogger.name = org.apache.hadoop.hive.ql.log.PerfLogger +logger.PerfLogger.level = ${sys:hive.perflogger.log.level} + +# root logger +rootLogger.level = ${sys:metastore.log.level} +rootLogger.appenderRefs = root +rootLogger.appenderRef.root.ref = ${sys:metastore.root.logger} diff --git standalone-metastore/src/main/resources/metastore-site.xml standalone-metastore/src/main/resources/metastore-site.xml new file mode 100644 index 0000000000..271fc59379 --- /dev/null +++ standalone-metastore/src/main/resources/metastore-site.xml @@ -0,0 +1,34 @@ + + + + + + metastore.thrift.uris + thrift://localhost:9083 + Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore. + + + metastore.task.threads.always + org.apache.hadoop.hive.metastore.events.EventCleanerTask + + + metastore.expression.proxy + org.apache.hadoop.hive.metastore.DefaultPartitionExpressionProxy + + \ No newline at end of file diff --git standalone-metastore/src/main/scripts/base standalone-metastore/src/main/scripts/base new file mode 100755 index 0000000000..2e37383242 --- /dev/null +++ standalone-metastore/src/main/scripts/base @@ -0,0 +1,231 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cygwin=false +case "`uname`" in + CYGWIN*) cygwin=true;; +esac + +bin=`dirname "$0"` +# The strange redirect on cd is because on some systems cd outputs the directory you changed to +bin=`cd "$bin" 2>&1 > /dev/null; pwd` + +. "$bin"/metastore-config.sh + +SERVICE="" +HELP="" +SKIP_HADOOPVERSION=false + +SERVICE_ARGS=() +while [ $# -gt 0 ]; do + case "$1" in + --version) + shift + SERVICE=version + ;; + --service) + shift + SERVICE=$1 + shift + ;; + --skiphadoopversion) + SKIP_HADOOPVERSION=true + shift + ;; + --help) + HELP=_help + shift + ;; + --debug*) + DEBUG=$1 + shift + ;; + *) + SERVICE_ARGS=("${SERVICE_ARGS[@]}" "$1") + shift + ;; + esac +done + +if [ "$SERVICE" = "" ] ; then + if [ "$HELP" = "_help" ] ; then + SERVICE="help" + else + SERVICE="cli" + fi +fi + +if [[ "$SERVICE" =~ ^(help|schemaTool)$ ]] ; then + SKIP_HADOOPVERSION=true +fi + +if [ -f "${METASTORE_CONF_DIR}/metastore-env.sh" ]; then + . "${METASTORE_CONF_DIR}/metastore-env.sh" +fi + +CLASSPATH="${METASTORE_CONF_DIR}" + +METASTORE_LIB=${METASTORE_HOME}/lib + +# needed for execution +if [ ! -f ${METASTORE_LIB}/hive-standalone-metastore-*.jar ]; then + echo "Missing Standalone MetaStore Jar" + exit 2; +fi + +for f in ${METASTORE_LIB}/*.jar; do + CLASSPATH=${CLASSPATH}:$f; +done + +# add the auxillary jars such as serdes +if [ -d "${METASTORE_AUX_JARS_PATH}" ]; then + hive_aux_jars_abspath=`cd ${METASTORE_AUX_JARS_PATH} 2>&1 > /dev/null && pwd` + for f in $hive_aux_jars_abspath/*.jar; do + if [[ ! -f $f ]]; then + continue; + fi + if $cygwin; then + f=`cygpath -w "$f"` + fi + AUX_CLASSPATH=${AUX_CLASSPATH}:$f + if [ "${AUX_PARAM}" == "" ]; then + AUX_PARAM=file://$f + else + AUX_PARAM=${AUX_PARAM},file://$f; + fi + done +elif [ "${METASTORE_AUX_JARS_PATH}" != "" ]; then + METASTORE_AUX_JARS_PATH=`echo $METASTORE_AUX_JARS_PATH | sed 's/,/:/g'` + if $cygwin; then + METASTORE_AUX_JARS_PATH=`cygpath -p -w "$METASTORE_AUX_JARS_PATH"` + METASTORE_AUX_JARS_PATH=`echo $METASTORE_AUX_JARS_PATH | sed 's/;/,/g'` + fi + AUX_CLASSPATH=${AUX_CLASSPATH}:${METASTORE_AUX_JARS_PATH} + AUX_PARAM="file://$(echo ${METASTORE_AUX_JARS_PATH} | sed 's/:/,file:\/\//g')" +fi + +if $cygwin; then + CLASSPATH=`cygpath -p -w "$CLASSPATH"` + CLASSPATH=${CLASSPATH};${AUX_CLASSPATH} +else + CLASSPATH=${CLASSPATH}:${AUX_CLASSPATH} +fi + +# supress the HADOOP_HOME warnings in 1.x.x +export HADOOP_HOME_WARN_SUPPRESS=true + +# to make sure log4j2.x and jline jars are loaded ahead of the jars pulled by hadoop +export HADOOP_USER_CLASSPATH_FIRST=true + +# pass classpath to hadoop +if [ "$HADOOP_CLASSPATH" != "" ]; then + export HADOOP_CLASSPATH="${CLASSPATH}:${HADOOP_CLASSPATH}" +else + export HADOOP_CLASSPATH="$CLASSPATH" +fi + +# also pass hive classpath to hadoop +if [ "$METASTORE_CLASSPATH" != "" ]; then + export HADOOP_CLASSPATH="${HADOOP_CLASSPATH}:${METASTORE_CLASSPATH}"; +fi + +# check for hadoop in the path +HADOOP_IN_PATH=`which hadoop 2>/dev/null` +if [ -f ${HADOOP_IN_PATH} ]; then + HADOOP_DIR=`dirname "$HADOOP_IN_PATH"`/.. +fi +# HADOOP_HOME env variable overrides hadoop in the path +HADOOP_HOME=${HADOOP_HOME:-${HADOOP_PREFIX:-$HADOOP_DIR}} +if [ "$HADOOP_HOME" == "" ]; then + echo "Cannot find hadoop installation: \$HADOOP_HOME or \$HADOOP_PREFIX must be set or hadoop must be in the path"; + exit 4; +fi + +HADOOP=$HADOOP_HOME/bin/hadoop +if [ ! -f ${HADOOP} ]; then + echo "Cannot find hadoop installation: \$HADOOP_HOME or \$HADOOP_PREFIX must be set or hadoop must be in the path"; + exit 4; +fi + +if [ "$SKIP_HADOOPVERSION" = false ]; then + # Make sure we're using a compatible version of Hadoop + if [ "x$HADOOP_VERSION" == "x" ]; then + HADOOP_VERSION=$($HADOOP version 2>&2 | awk -F"\t" '/Hadoop/ {print $0}' | cut -d' ' -f 2); + fi + + # Save the regex to a var to workaround quoting incompatabilities + # between Bash 3.1 and 3.2 + hadoop_version_re="^([[:digit:]]+)\.([[:digit:]]+)(\.([[:digit:]]+))?.*$" + + if [[ "$HADOOP_VERSION" =~ $hadoop_version_re ]]; then + hadoop_major_ver=${BASH_REMATCH[1]} + hadoop_minor_ver=${BASH_REMATCH[2]} + hadoop_patch_ver=${BASH_REMATCH[4]} + else + echo "Unable to determine Hadoop version information." + echo "'hadoop version' returned:" + echo `$HADOOP version` + exit 5 + fi + + if [ "$hadoop_major_ver" -lt "2" ] || [ "$hadoop_major_ver" -eq "2" -a "$hadoop_minor_ver" -lt "6" ]; then + echo "Standalone metastore requires Hadoop 2.6 or later." + echo "'hadoop version' returned:" + echo `$HADOOP version` + exit 6 + fi +fi + +if [ "${AUX_PARAM}" != "" ]; then + METASTORE_OPTS="$METASTORE_OPTS --hiveconf hive.aux.jars.path=${AUX_PARAM}" + AUX_JARS_CMD_LINE="-libjars ${AUX_PARAM}" +fi + +SERVICE_LIST="" + +for i in "$bin"/ext/*.sh ; do + . $i +done + +if [ "$DEBUG" ]; then + if [ "$HELP" ]; then + debug_help + exit 0 + else + get_debug_params "$DEBUG" + export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS $METASTORE_MAIN_CLIENT_DEBUG_OPTS" + fi +fi + +TORUN="" +for j in $SERVICE_LIST ; do + if [ "$j" = "$SERVICE" ] ; then + TORUN=${j}$HELP + fi +done + +# to initialize logging for all services +export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Dlog4j.configurationFile=metastore-log4j2.properties" + +if [ "$TORUN" = "" ] ; then + echo "Service $SERVICE not found" + echo "Available Services: $SERVICE_LIST" + exit 7 +else + set -- "${SERVICE_ARGS[@]}" + $TORUN "$@" +fi diff --git standalone-metastore/src/main/scripts/ext/metastore.sh standalone-metastore/src/main/scripts/ext/metastore.sh new file mode 100644 index 0000000000..6b12991e43 --- /dev/null +++ standalone-metastore/src/main/scripts/ext/metastore.sh @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +THISSERVICE=metastore +export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} " + +metastore() { + echo "$(timestamp): Starting Metastore Server" + CLASS=org.apache.hadoop.hive.metastore.HiveMetaStore + if $cygwin; then + METASTORE_LIB=`cygpath -w "$METASTORE_LIB"` + fi + JAR=${METASTORE_LIB}/hive-standalone-metastore-*.jar + + # hadoop 20 or newer - skip the aux_jars option and hiveconf + + export HADOOP_CLIENT_OPTS=" -Dproc_metastore $HADOOP_CLIENT_OPTS " + export HADOOP_OPTS="$METASTORE_HADOOP_OPTS $HADOOP_OPTS" + exec $HADOOP jar $JAR $CLASS "$@" +} + +metastore_help() { + metastore -h +} + +timestamp() +{ + date +"%Y-%m-%d %T" +} diff --git standalone-metastore/src/main/scripts/ext/schemaTool.sh standalone-metastore/src/main/scripts/ext/schemaTool.sh new file mode 100644 index 0000000000..bfc8d789da --- /dev/null +++ standalone-metastore/src/main/scripts/ext/schemaTool.sh @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +THISSERVICE=schemaTool +export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} " + +schemaTool() { + METASTORE_OPTS='' + CLASS=org.apache.hadoop.hive.metastore.tools.MetastoreSchemaTool + if $cygwin; then + METASTORE_LIB=`cygpath -w "$METASTORE_LIB"` + fi + JAR=${METASTORE_LIB}/hive-standalone-metastore-*.jar + + # hadoop 20 or newer - skip the aux_jars option and hiveconf + exec $HADOOP jar $JAR $CLASS "$@" +} + +schemaTool_help () { + schemaTool -h +} diff --git standalone-metastore/src/main/scripts/ext/smokeTest.sh standalone-metastore/src/main/scripts/ext/smokeTest.sh new file mode 100644 index 0000000000..ef000e9b5b --- /dev/null +++ standalone-metastore/src/main/scripts/ext/smokeTest.sh @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +THISSERVICE=smokeTest +export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} " + +smokeTest() { + METASTORE_OPTS='' + CLASS=org.apache.hadoop.hive.metastore.tools.SmokeTest + if $cygwin; then + METASTORE_LIB=`cygpath -w "$METASTORE_LIB"` + fi + JAR=${METASTORE_LIB}/hive-standalone-metastore-*.jar + + # hadoop 20 or newer - skip the aux_jars option and hiveconf + exec $HADOOP jar $JAR $CLASS "$@" +} + +smokeTest_help () { + echo "There is no help, just run it" +} diff --git standalone-metastore/src/main/scripts/metastore-config.sh standalone-metastore/src/main/scripts/metastore-config.sh new file mode 100644 index 0000000000..e32f95e183 --- /dev/null +++ standalone-metastore/src/main/scripts/metastore-config.sh @@ -0,0 +1,69 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# processes --config option from command line +# + +this="$0" +while [ -h "$this" ]; do + ls=`ls -ld "$this"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '.*/.*' > /dev/null; then + this="$link" + else + this=`dirname "$this"`/"$link" + fi +done + +# convert relative path to absolute path +# bin is set from calling script +script=`basename "$this"` +this="$bin/$script" + +# the root of the Hive installation +if [[ -z $METASTORE_HOME ]] ; then + export METASTORE_HOME=`dirname "$bin"` +fi + +#check to see if the conf dir is given as an optional argument +while [ $# -gt 0 ]; do # Until you run out of parameters . . . + case "$1" in + --config) + shift + confdir=$1 + shift + METASTORE_CONF_DIR=$confdir + ;; + --auxpath) + shift + METASTORE_AUX_JARS_PATH=$1 + shift + ;; + *) + break; + ;; + esac +done + + +# Allow alternate conf dir location. +METASTORE_CONF_DIR="${METASTORE_CONF_DIR:-$METASTORE_HOME/conf}" + +export METASTORE_CONF_DIR=$METASTORE_CONF_DIR +export METASTORE_AUX_JARS_PATH=$METASTORE_AUX_JARS_PATH + +# Default to use 256MB +export HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-256} diff --git standalone-metastore/src/main/scripts/schematool standalone-metastore/src/main/scripts/schematool new file mode 100644 index 0000000000..c1ac13feab --- /dev/null +++ standalone-metastore/src/main/scripts/schematool @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +bin=`dirname "$0"` +bin=`cd "$bin" 2>&1 > /dev/null; pwd` + +. "$bin"/base --service schemaTool "$@" diff --git standalone-metastore/src/main/scripts/start-metastore standalone-metastore/src/main/scripts/start-metastore new file mode 100644 index 0000000000..71b1004ca3 --- /dev/null +++ standalone-metastore/src/main/scripts/start-metastore @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +bin=`dirname "$0"` +# The strange redirect on cd is because on some systems cd outputs the directory you changed to +bin=`cd "$bin" 2>&1 > /dev/null; pwd` + +. "$bin"/base --service metastore "$@" diff --git standalone-metastore/src/main/sql/derby/hive-schema-1.2.0.derby.sql standalone-metastore/src/main/sql/derby/hive-schema-1.2.0.derby.sql new file mode 100644 index 0000000000..43f61bfe23 --- /dev/null +++ standalone-metastore/src/main/sql/derby/hive-schema-1.2.0.derby.sql @@ -0,0 +1,405 @@ +-- Timestamp: 2011-09-22 15:32:02.024 +-- Source database is: /home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb +-- Connection URL is: jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb +-- Specified schema is: APP +-- appendLogs: false + +-- ---------------------------------------------- +-- DDL Statements for functions +-- ---------------------------------------------- + +CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ; + +CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ; + +-- ---------------------------------------------- +-- DDL Statements for tables +-- ---------------------------------------------- + +CREATE TABLE "APP"."DBS" ("DB_ID" BIGINT NOT NULL, "DESC" VARCHAR(4000), "DB_LOCATION_URI" VARCHAR(4000) NOT NULL, "NAME" VARCHAR(128), "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10)); + +CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT); + +CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); + +CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(128), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT); + +CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); + +CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(128), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL); + +CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, "PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128)); + +CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT); + +CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); + +CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT); + +CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000)); + +CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128)); + +CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "ROLE_ID" BIGINT); + +CREATE TABLE "APP"."TYPES" ("TYPES_ID" BIGINT NOT NULL, "TYPE_NAME" VARCHAR(128), "TYPE1" VARCHAR(767), "TYPE2" VARCHAR(767)); + +CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "USER_PRIV" VARCHAR(128)); + +CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); + +CREATE TABLE "APP"."PARTITION_EVENTS" ("PART_NAME_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_TIME" BIGINT NOT NULL, "EVENT_TYPE" INTEGER NOT NULL, "PARTITION_NAME" VARCHAR(767), "TBL_NAME" VARCHAR(128)); + +CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128)); + +CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(128), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR); + +CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(128), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128)); + +CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT, "IS_STOREDASSUBDIRECTORIES" CHAR(1) NOT NULL); + +CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL); + +CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); + +CREATE TABLE "APP"."BUCKETING_COLS" ("SD_ID" BIGINT NOT NULL, "BUCKET_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "FIELD_NAME" VARCHAR(128) NOT NULL, "FIELD_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL); + +CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); + +CREATE TABLE "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID" BIGINT NOT NULL); + +CREATE TABLE "APP"."SKEWED_STRING_LIST_VALUES" ("STRING_LIST_ID" BIGINT NOT NULL, "STRING_LIST_VALUE" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."SKEWED_COL_NAMES" ("SD_ID" BIGINT NOT NULL, "SKEWED_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ("SD_ID" BIGINT NOT NULL, "STRING_LIST_ID_KID" BIGINT NOT NULL, "LOCATION" VARCHAR(4000)); + +CREATE TABLE "APP"."SKEWED_VALUES" ("SD_ID_OID" BIGINT NOT NULL, "STRING_LIST_ID_EID" BIGINT NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as identity (start with 1), "MASTER_KEY" VARCHAR(767)); + +CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767)); + +CREATE TABLE "APP"."TAB_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(128) NOT NULL, "COLUMN_NAME" VARCHAR(128) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL); + +CREATE TABLE "APP"."PART_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(128) NOT NULL, "PARTITION_NAME" VARCHAR(767) NOT NULL, "COLUMN_NAME" VARCHAR(128) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "PART_ID" BIGINT NOT NULL); + +CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255)); + +CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000), "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "FUNC_NAME" VARCHAR(128), "FUNC_TYPE" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10)); + +CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_ID" BIGINT NOT NULL, "EVENT_TIME" INTEGER NOT NULL, "EVENT_TYPE" VARCHAR(32) NOT NULL, "MESSAGE" LONG VARCHAR, "TBL_NAME" VARCHAR(128)); + +CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL); + +-- ---------------------------------------------- +-- DDL Statements for indexes +-- ---------------------------------------------- + +CREATE UNIQUE INDEX "APP"."UNIQUEINDEX" ON "APP"."IDXS" ("INDEX_NAME", "ORIG_TBL_ID"); + +CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); + +CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME"); + +CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME"); + +CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."GLOBALPRIVILEGEINDEX" ON "APP"."GLOBAL_PRIVS" ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME"); + +CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID"); + +CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID"); + +CREATE INDEX "APP"."FUNCS_N49" ON "APP"."FUNCS" ("DB_ID"); + +CREATE INDEX "APP"."FUNC_RU_N49" ON "APP"."FUNC_RU" ("FUNC_ID"); + +-- ---------------------------------------------- +-- DDL Statements for keys +-- ---------------------------------------------- + +-- primary/unique +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_PK" PRIMARY KEY ("INDEX_ID"); + +ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_PK" PRIMARY KEY ("TBL_COLUMN_GRANT_ID"); + +ALTER TABLE "APP"."CDS" ADD CONSTRAINT "SQL110922153006460" PRIMARY KEY ("CD_ID"); + +ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_PK" PRIMARY KEY ("DB_GRANT_ID"); + +ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_PK" PRIMARY KEY ("INDEX_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME"); + +ALTER TABLE "APP"."SEQUENCE_TABLE" ADD CONSTRAINT "SEQUENCE_TABLE_PK" PRIMARY KEY ("SEQUENCE_NAME"); + +ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_PK" PRIMARY KEY ("PART_GRANT_ID"); + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_PK" PRIMARY KEY ("SD_ID"); + +ALTER TABLE "APP"."SERDES" ADD CONSTRAINT "SERDES_PK" PRIMARY KEY ("SERDE_ID"); + +ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_PK" PRIMARY KEY ("SD_ID", "COLUMN_NAME"); + +ALTER TABLE "APP"."PARTITION_EVENTS" ADD CONSTRAINT "PARTITION_EVENTS_PK" PRIMARY KEY ("PART_NAME_ID"); + +ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_PK" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME"); + +ALTER TABLE "APP"."ROLES" ADD CONSTRAINT "ROLES_PK" PRIMARY KEY ("ROLE_ID"); + +ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_PK" PRIMARY KEY ("TBL_GRANT_ID"); + +ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_PK" PRIMARY KEY ("SERDE_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."NUCLEUS_TABLES" ADD CONSTRAINT "NUCLEUS_TABLES_PK" PRIMARY KEY ("CLASS_NAME"); + +ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_PK" PRIMARY KEY ("TBL_ID"); + +ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_PK" PRIMARY KEY ("SD_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_PK" PRIMARY KEY ("DB_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_PK" PRIMARY KEY ("DB_ID"); + +ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_PK" PRIMARY KEY ("ROLE_GRANT_ID"); + +ALTER TABLE "APP"."GLOBAL_PRIVS" ADD CONSTRAINT "GLOBAL_PRIVS_PK" PRIMARY KEY ("USER_GRANT_ID"); + +ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_PK" PRIMARY KEY ("PART_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."TYPES" ADD CONSTRAINT "TYPES_PK" PRIMARY KEY ("TYPES_ID"); + +ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "SQL110922153006740" PRIMARY KEY ("CD_ID", "COLUMN_NAME"); + +ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_PK" PRIMARY KEY ("PART_COLUMN_GRANT_ID"); + +ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_PK" PRIMARY KEY ("PART_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_PK" PRIMARY KEY ("PART_ID"); + +ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_PK" PRIMARY KEY ("TBL_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."SKEWED_STRING_LIST" ADD CONSTRAINT "SKEWED_STRING_LIST_PK" PRIMARY KEY ("STRING_LIST_ID"); + +ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_PK" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_PK" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID"); + +ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_PK" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX"); + +ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_PK" PRIMARY KEY ("CS_ID"); + +ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_PK" PRIMARY KEY ("CS_ID"); + +ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_PK" PRIMARY KEY ("FUNC_ID"); + +ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_PK" PRIMARY KEY ("FUNC_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."NOTIFICATION_LOG" ADD CONSTRAINT "NOTIFICATION_LOG_PK" PRIMARY KEY ("NL_ID"); + +ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "NOTIFICATION_SEQUENCE_PK" PRIMARY KEY ("NNI_ID"); + +-- foreign +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK3" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_FK1" FOREIGN KEY ("INDEX_ID") REFERENCES "APP"."IDXS" ("INDEX_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEYS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK2" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_FK1" FOREIGN KEY ("TYPE_NAME") REFERENCES "APP"."TYPES" ("TYPES_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_FK1" FOREIGN KEY ("ROLE_ID") REFERENCES "APP"."ROLES" ("ROLE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "COLUMNS_V2_FK1" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_FK1" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK1" FOREIGN KEY ("SD_ID_OID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK2" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_FK" FOREIGN KEY ("TBL_ID") REFERENCES TBLS("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_FK" FOREIGN KEY ("PART_ID") REFERENCES PARTITIONS("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."VERSION" ADD CONSTRAINT "VERSION_PK" PRIMARY KEY ("VER_ID"); + +ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +-- ---------------------------------------------- +-- DDL Statements for checks +-- ---------------------------------------------- + +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "SQL110318025504980" CHECK (DEFERRED_REBUILD IN ('Y','N')); + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SQL110318025505550" CHECK (IS_COMPRESSED IN ('Y','N')); + +-- ---------------------------- +-- Transaction and Lock Tables +-- ---------------------------- +CREATE TABLE TXNS ( + TXN_ID bigint PRIMARY KEY, + TXN_STATE char(1) NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL +); + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID bigint REFERENCES TXNS (TXN_ID), + TC_DATABASE varchar(128) NOT NULL, + TC_TABLE varchar(128), + TC_PARTITION varchar(767) +); + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID bigint, + CTC_DATABASE varchar(128) NOT NULL, + CTC_TABLE varchar(128), + CTC_PARTITION varchar(767) +); + +CREATE TABLE NEXT_TXN_ID ( + NTXN_NEXT bigint NOT NULL +); +INSERT INTO NEXT_TXN_ID VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint, + HL_DB varchar(128) NOT NULL, + HL_TABLE varchar(128), + HL_PARTITION varchar(767), + HL_LOCK_STATE char(1) NOT NULL, + HL_LOCK_TYPE char(1) NOT NULL, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint, + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID) +); + +CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID); + +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT bigint NOT NULL +); +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID bigint PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(128) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_WORKER_ID varchar(128), + CQ_START bigint, + CQ_RUN_AS varchar(128) +); + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT bigint NOT NULL +); +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + + + + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '1.2.0', 'Hive release version 1.2.0'); + diff --git standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql new file mode 100644 index 0000000000..ac28869946 --- /dev/null +++ standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql @@ -0,0 +1,531 @@ +-- Timestamp: 2011-09-22 15:32:02.024 +-- Source database is: /home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb +-- Connection URL is: jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb +-- Specified schema is: APP +-- appendLogs: false + +-- ---------------------------------------------- +-- DDL Statements for functions +-- ---------------------------------------------- + +CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ; + +CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ; + +-- ---------------------------------------------- +-- DDL Statements for tables +-- ---------------------------------------------- + +CREATE TABLE "APP"."DBS" ("DB_ID" BIGINT NOT NULL, "DESC" VARCHAR(4000), "DB_LOCATION_URI" VARCHAR(4000) NOT NULL, "NAME" VARCHAR(128), "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10)); + +CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT); + +CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); + +CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT); + +CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB); + +CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(767) NOT NULL, "TYPE_NAME" CLOB, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL); + +CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, "PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128)); + +CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT); + +CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); + +CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT); + +CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000)); + +CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128)); + +CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "ROLE_ID" BIGINT); + +CREATE TABLE "APP"."TYPES" ("TYPES_ID" BIGINT NOT NULL, "TYPE_NAME" VARCHAR(128), "TYPE1" VARCHAR(767), "TYPE2" VARCHAR(767)); + +CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "USER_PRIV" VARCHAR(128)); + +CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); + +CREATE TABLE "APP"."PARTITION_EVENTS" ("PART_NAME_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_TIME" BIGINT NOT NULL, "EVENT_TYPE" INTEGER NOT NULL, "PARTITION_NAME" VARCHAR(767), "TBL_NAME" VARCHAR(256)); + +CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128)); + +CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N'); + +CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128)); + +CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT, "IS_STOREDASSUBDIRECTORIES" CHAR(1) NOT NULL); + +CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL); + +CREATE TABLE "APP"."TAB_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(256) NOT NULL, "COLUMN_NAME" VARCHAR(767) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),"BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "BIT_VECTOR" BLOB); + +CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB); + +CREATE TABLE "APP"."BUCKETING_COLS" ("SD_ID" BIGINT NOT NULL, "BUCKET_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "FIELD_NAME" VARCHAR(128) NOT NULL, "FIELD_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL); + +CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB); + +CREATE TABLE "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID" BIGINT NOT NULL); + +CREATE TABLE "APP"."SKEWED_STRING_LIST_VALUES" ("STRING_LIST_ID" BIGINT NOT NULL, "STRING_LIST_VALUE" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."SKEWED_COL_NAMES" ("SD_ID" BIGINT NOT NULL, "SKEWED_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ("SD_ID" BIGINT NOT NULL, "STRING_LIST_ID_KID" BIGINT NOT NULL, "LOCATION" VARCHAR(4000)); + +CREATE TABLE "APP"."SKEWED_VALUES" ("SD_ID_OID" BIGINT NOT NULL, "STRING_LIST_ID_EID" BIGINT NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as identity (start with 1), "MASTER_KEY" VARCHAR(767)); + +CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767)); + +CREATE TABLE "APP"."PART_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(256) NOT NULL, "PARTITION_NAME" VARCHAR(767) NOT NULL, "COLUMN_NAME" VARCHAR(767) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "BIT_VECTOR" BLOB, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "PART_ID" BIGINT NOT NULL); + +CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255)); + +CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000), "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "FUNC_NAME" VARCHAR(128), "FUNC_TYPE" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10)); + +CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_ID" BIGINT NOT NULL, "EVENT_TIME" INTEGER NOT NULL, "EVENT_TYPE" VARCHAR(32) NOT NULL, "MESSAGE" CLOB, "TBL_NAME" VARCHAR(256), "MESSAGE_FORMAT" VARCHAR(16)); + +CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL); + +CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT NOT NULL, "PARENT_INTEGER_IDX" INTEGER, "PARENT_TBL_ID" BIGINT NOT NULL, "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL); + +CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NULL, "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, "DESCRIPTION" VARCHAR(1000)); + +CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL, DEFAULT_POOL_ID BIGINT); + +CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024)); + +CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024), IS_IN_UNMANAGED INTEGER NOT NULL DEFAULT 0); + +CREATE TABLE "APP"."WM_POOL_TO_TRIGGER" (POOL_ID BIGINT NOT NULL, TRIGGER_ID BIGINT NOT NULL); + +CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, ENTITY_TYPE VARCHAR(128) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID BIGINT, ORDERING INTEGER); + +CREATE TABLE "APP"."MV_CREATION_METADATA" ( + "MV_CREATION_METADATA_ID" BIGINT NOT NULL, + "DB_NAME" VARCHAR(128) NOT NULL, + "TBL_NAME" VARCHAR(256) NOT NULL, + "TXN_LIST" CLOB +); + +CREATE TABLE "APP"."MV_TABLES_USED" ( + "MV_CREATION_METADATA_ID" BIGINT NOT NULL, + "TBL_ID" BIGINT NOT NULL +); + +-- ---------------------------------------------- +-- DML Statements +-- ---------------------------------------------- + +INSERT INTO "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT * FROM (VALUES (1,1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "APP"."NOTIFICATION_SEQUENCE"); + +-- ---------------------------------------------- +-- DDL Statements for indexes +-- ---------------------------------------------- + +CREATE UNIQUE INDEX "APP"."UNIQUEINDEX" ON "APP"."IDXS" ("INDEX_NAME", "ORIG_TBL_ID"); + +CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); + +CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME"); + +CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME"); + +CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."GLOBALPRIVILEGEINDEX" ON "APP"."GLOBAL_PRIVS" ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME"); + +CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID"); + +CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID"); + +CREATE INDEX "APP"."FUNCS_N49" ON "APP"."FUNCS" ("DB_ID"); + +CREATE INDEX "APP"."FUNC_RU_N49" ON "APP"."FUNC_RU" ("FUNC_ID"); + +CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID"); + +CREATE INDEX "APP"."CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "APP"."KEY_CONSTRAINTS"("CONSTRAINT_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NAME"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_TRIGGER" ON "APP"."WM_TRIGGER" ("RP_ID", "NAME"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME"); + +CREATE UNIQUE INDEX "APP"."MV_UNIQUE_TABLE" ON "APP"."MV_CREATION_METADATA" ("TBL_NAME", "DB_NAME"); + +-- ---------------------------------------------- +-- DDL Statements for keys +-- ---------------------------------------------- + +-- primary/unique +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_PK" PRIMARY KEY ("INDEX_ID"); + +ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_PK" PRIMARY KEY ("TBL_COLUMN_GRANT_ID"); + +ALTER TABLE "APP"."CDS" ADD CONSTRAINT "SQL110922153006460" PRIMARY KEY ("CD_ID"); + +ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_PK" PRIMARY KEY ("DB_GRANT_ID"); + +ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_PK" PRIMARY KEY ("INDEX_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME"); + +ALTER TABLE "APP"."SEQUENCE_TABLE" ADD CONSTRAINT "SEQUENCE_TABLE_PK" PRIMARY KEY ("SEQUENCE_NAME"); + +ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_PK" PRIMARY KEY ("PART_GRANT_ID"); + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_PK" PRIMARY KEY ("SD_ID"); + +ALTER TABLE "APP"."SERDES" ADD CONSTRAINT "SERDES_PK" PRIMARY KEY ("SERDE_ID"); + +ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_PK" PRIMARY KEY ("SD_ID", "COLUMN_NAME"); + +ALTER TABLE "APP"."PARTITION_EVENTS" ADD CONSTRAINT "PARTITION_EVENTS_PK" PRIMARY KEY ("PART_NAME_ID"); + +ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_PK" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME"); + +ALTER TABLE "APP"."ROLES" ADD CONSTRAINT "ROLES_PK" PRIMARY KEY ("ROLE_ID"); + +ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_PK" PRIMARY KEY ("TBL_GRANT_ID"); + +ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_PK" PRIMARY KEY ("SERDE_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."NUCLEUS_TABLES" ADD CONSTRAINT "NUCLEUS_TABLES_PK" PRIMARY KEY ("CLASS_NAME"); + +ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_PK" PRIMARY KEY ("TBL_ID"); + +ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_PK" PRIMARY KEY ("SD_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_PK" PRIMARY KEY ("DB_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_PK" PRIMARY KEY ("DB_ID"); + +ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_PK" PRIMARY KEY ("ROLE_GRANT_ID"); + +ALTER TABLE "APP"."GLOBAL_PRIVS" ADD CONSTRAINT "GLOBAL_PRIVS_PK" PRIMARY KEY ("USER_GRANT_ID"); + +ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_PK" PRIMARY KEY ("PART_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."TYPES" ADD CONSTRAINT "TYPES_PK" PRIMARY KEY ("TYPES_ID"); + +ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "SQL110922153006740" PRIMARY KEY ("CD_ID", "COLUMN_NAME"); + +ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_PK" PRIMARY KEY ("PART_COLUMN_GRANT_ID"); + +ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_PK" PRIMARY KEY ("PART_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_PK" PRIMARY KEY ("PART_ID"); + +ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_PK" PRIMARY KEY ("TBL_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."SKEWED_STRING_LIST" ADD CONSTRAINT "SKEWED_STRING_LIST_PK" PRIMARY KEY ("STRING_LIST_ID"); + +ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_PK" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_PK" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID"); + +ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_PK" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX"); + +ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_PK" PRIMARY KEY ("CS_ID"); + +ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_PK" PRIMARY KEY ("CS_ID"); + +ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_PK" PRIMARY KEY ("FUNC_ID"); + +ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_PK" PRIMARY KEY ("FUNC_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."NOTIFICATION_LOG" ADD CONSTRAINT "NOTIFICATION_LOG_PK" PRIMARY KEY ("NL_ID"); + +ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "NOTIFICATION_SEQUENCE_PK" PRIMARY KEY ("NNI_ID"); + +ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("CONSTRAINT_NAME", "POSITION"); + +ALTER TABLE "APP"."METASTORE_DB_PROPERTIES" ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY"); + +ALTER TABLE "APP"."MV_CREATION_METADATA" ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID"); + +-- foreign +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK3" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_FK1" FOREIGN KEY ("INDEX_ID") REFERENCES "APP"."IDXS" ("INDEX_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEYS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK2" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_FK1" FOREIGN KEY ("TYPE_NAME") REFERENCES "APP"."TYPES" ("TYPES_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_FK1" FOREIGN KEY ("ROLE_ID") REFERENCES "APP"."ROLES" ("ROLE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "COLUMNS_V2_FK1" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_FK1" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK1" FOREIGN KEY ("SD_ID_OID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK2" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_FK" FOREIGN KEY ("TBL_ID") REFERENCES TBLS("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_FK" FOREIGN KEY ("PART_ID") REFERENCES PARTITIONS("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."VERSION" ADD CONSTRAINT "VERSION_PK" PRIMARY KEY ("VER_ID"); + +ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID"); + +ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID"); + +ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID"); + +ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "APP"."WM_TRIGGER" ("TRIGGER_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_PK" PRIMARY KEY ("MAPPING_ID"); + +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN KEY ("MV_CREATION_METADATA_ID") REFERENCES "APP"."MV_CREATION_METADATA" ("MV_CREATION_METADATA_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +-- ---------------------------------------------- +-- DDL Statements for checks +-- ---------------------------------------------- + +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "SQL110318025504980" CHECK (DEFERRED_REBUILD IN ('Y','N')); + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SQL110318025505550" CHECK (IS_COMPRESSED IN ('Y','N')); + +-- ---------------------------- +-- Transaction and Lock Tables +-- ---------------------------- +CREATE TABLE TXNS ( + TXN_ID bigint PRIMARY KEY, + TXN_STATE char(1) NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL, + TXN_AGENT_INFO varchar(128), + TXN_META_INFO varchar(128), + TXN_HEARTBEAT_COUNT integer +); + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID bigint REFERENCES TXNS (TXN_ID), + TC_DATABASE varchar(128) NOT NULL, + TC_TABLE varchar(128), + TC_PARTITION varchar(767), + TC_OPERATION_TYPE char(1) NOT NULL +); + +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID bigint, + CTC_DATABASE varchar(128) NOT NULL, + CTC_TABLE varchar(256), + CTC_PARTITION varchar(767), + CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL +); + +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +CREATE TABLE NEXT_TXN_ID ( + NTXN_NEXT bigint NOT NULL +); +INSERT INTO NEXT_TXN_ID VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint, + HL_DB varchar(128) NOT NULL, + HL_TABLE varchar(128), + HL_PARTITION varchar(767), + HL_LOCK_STATE char(1) NOT NULL, + HL_LOCK_TYPE char(1) NOT NULL, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint, + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + HL_HEARTBEAT_COUNT integer, + HL_AGENT_INFO varchar(128), + HL_BLOCKEDBY_EXT_ID bigint, + HL_BLOCKEDBY_INT_ID bigint, + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID) +); + +CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID); + +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT bigint NOT NULL +); +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID bigint PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(128) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_TBLPROPERTIES varchar(2048), + CQ_WORKER_ID varchar(128), + CQ_START bigint, + CQ_RUN_AS varchar(128), + CQ_HIGHEST_TXN_ID bigint, + CQ_META_INFO varchar(2048) for bit data, + CQ_HADOOP_JOB_ID varchar(32) +); + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT bigint NOT NULL +); +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID bigint PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(128) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_TBLPROPERTIES varchar(2048), + CC_WORKER_ID varchar(128), + CC_START bigint, + CC_END bigint, + CC_RUN_AS varchar(128), + CC_HIGHEST_TXN_ID bigint, + CC_META_INFO varchar(2048) for bit data, + CC_HADOOP_JOB_ID varchar(32) +); + +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar(128) NOT NULL, + MT_KEY2 bigint NOT NULL, + MT_COMMENT varchar(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +); + +--1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK +--This is a good candidate for Index orgainzed table +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar(128) NOT NULL, + WS_TABLE varchar(128) NOT NULL, + WS_PARTITION varchar(767), + WS_TXNID bigint NOT NULL, + WS_COMMIT_ID bigint NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +); + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '3.0.0', 'Hive release version 3.0.0'); diff --git standalone-metastore/src/main/sql/derby/upgrade-1.2.0-to-2.0.0.derby.sql standalone-metastore/src/main/sql/derby/upgrade-1.2.0-to-2.0.0.derby.sql new file mode 100644 index 0000000000..c7cf8cb5f7 --- /dev/null +++ standalone-metastore/src/main/sql/derby/upgrade-1.2.0-to-2.0.0.derby.sql @@ -0,0 +1,62 @@ +-- Upgrade MetaStore schema from 1.2.0 to 2.0.0 +--RUN '021-HIVE-11970.derby.sql'; +ALTER TABLE "COLUMNS_V2" ALTER "COLUMN_NAME" SET DATA TYPE VARCHAR(1000); +ALTER TABLE "PART_COL_PRIVS" ALTER "COLUMN_NAME" SET DATA TYPE VARCHAR(1000); +ALTER TABLE "TBL_COL_PRIVS" ALTER "COLUMN_NAME" SET DATA TYPE VARCHAR(1000); +ALTER TABLE "SORT_COLS" ALTER "COLUMN_NAME" SET DATA TYPE VARCHAR(1000); +ALTER TABLE "TAB_COL_STATS" ALTER "COLUMN_NAME" SET DATA TYPE VARCHAR(1000); +ALTER TABLE "PART_COL_STATS" ALTER "COLUMN_NAME" SET DATA TYPE VARCHAR(1000); + +--RUN '023-HIVE-12807.derby.sql'; +ALTER TABLE "COMPACTION_QUEUE" ADD "CQ_HIGHEST_TXN_ID" bigint; + +--RUN '024-HIVE-12814.derby.sql'; +ALTER TABLE "COMPACTION_QUEUE" ADD "CQ_META_INFO" varchar(2048) for bit data; + +--RUN '025-HIVE-12816.derby.sql'; +ALTER TABLE "COMPACTION_QUEUE" ADD "CQ_HADOOP_JOB_ID" VARCHAR(32); + +--RUN '026-HIVE-12818.derby.sql'; +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID bigint PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(128) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_WORKER_ID varchar(128), + CC_START bigint, + CC_END bigint, + CC_RUN_AS varchar(128), + CC_HIGHEST_TXN_ID bigint, + CC_META_INFO varchar(2048) for bit data, + CC_HADOOP_JOB_ID varchar(32) +); + +--RUN '027-HIVE-12819.derby.sql'; +ALTER TABLE "TXNS" ADD "TXN_AGENT_INFO" varchar(128); + +--RUN '028-HIVE-12821.derby.sql'; +ALTER TABLE "TXNS" ADD "TXN_HEARTBEAT_COUNT" INTEGER; +ALTER TABLE "HIVE_LOCKS" ADD "HL_HEARTBEAT_COUNT" INTEGER; + +--RUN '029-HIVE-12822.derby.sql'; +ALTER TABLE "TXNS" ADD "TXN_META_INFO" varchar(128); + +--RUN '030-HIVE-12823.derby.sql'; +ALTER TABLE "HIVE_LOCKS" ADD "HL_AGENT_INFO" varchar(128); + +--RUN '031-HIVE-12831.derby.sql'; +ALTER TABLE "HIVE_LOCKS" ADD "HL_BLOCKEDBY_EXT_ID" bigint; +ALTER TABLE "HIVE_LOCKS" ADD "HL_BLOCKEDBY_INT_ID" bigint; + +--RUN '032-HIVE-12832.derby.sql'; +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar(128) NOT NULL, + MT_KEY2 bigint NOT NULL, + MT_COMMENT varchar(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +); + + +UPDATE "APP".VERSION SET SCHEMA_VERSION='2.0.0', VERSION_COMMENT='Hive release version 2.0.0' where VER_ID=1; diff --git standalone-metastore/src/main/sql/derby/upgrade-2.0.0-to-2.1.0.derby.sql standalone-metastore/src/main/sql/derby/upgrade-2.0.0-to-2.1.0.derby.sql new file mode 100644 index 0000000000..b7155c59ed --- /dev/null +++ standalone-metastore/src/main/sql/derby/upgrade-2.0.0-to-2.1.0.derby.sql @@ -0,0 +1,22 @@ +-- Upgrade MetaStore schema from 2.0.0 to 2.1.0 +--RUN '034-HIVE-13076.derby.sql'; +CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT NOT NULL, "PARENT_INTEGER_IDX" INTEGER NOT NULL, "PARENT_TBL_ID" BIGINT NOT NULL, "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL); +ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("CONSTRAINT_NAME", "POSITION"); +CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID"); + +--RUN '035-HIVE-13395.derby.sql'; +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar(128) NOT NULL, + WS_TABLE varchar(128) NOT NULL, + WS_PARTITION varchar(767), + WS_TXNID bigint NOT NULL, + WS_COMMIT_ID bigint NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +); +ALTER TABLE TXN_COMPONENTS ADD TC_OPERATION_TYPE char(1); + +--RUN '036-HIVE-13354.derby.sql'; +ALTER TABLE COMPACTION_QUEUE ADD CQ_TBLPROPERTIES varchar(2048); +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_TBLPROPERTIES varchar(2048); + +UPDATE "APP".VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1; diff --git standalone-metastore/src/main/sql/derby/upgrade-2.1.0-to-2.2.0.derby.sql standalone-metastore/src/main/sql/derby/upgrade-2.1.0-to-2.2.0.derby.sql new file mode 100644 index 0000000000..23bb9d451e --- /dev/null +++ standalone-metastore/src/main/sql/derby/upgrade-2.1.0-to-2.2.0.derby.sql @@ -0,0 +1,59 @@ +-- Upgrade MetaStore schema from 2.1.0 to 2.2.0 +--RUN '037-HIVE-14496.derby.sql'; +-- Step 1: Add the column allowing null +ALTER TABLE "APP"."TBLS" ADD "IS_REWRITE_ENABLED" CHAR(1); + + -- Step 2: Replace the null with default value (false) +UPDATE "APP"."TBLS" SET "IS_REWRITE_ENABLED" = 'N'; + +-- Step 3: Alter the column to disallow null values +ALTER TABLE "APP"."TBLS" ALTER COLUMN "IS_REWRITE_ENABLED" SET DEFAULT 'N'; +ALTER TABLE "APP"."TBLS" ALTER COLUMN "IS_REWRITE_ENABLED" NOT NULL; + +--RUN '038-HIVE-10562.derby.sql'; +-- Step 1: Add the column for format +ALTER TABLE "APP"."NOTIFICATION_LOG" ADD "MESSAGE_FORMAT" varchar(16); + +-- Step 2 : Change the type of the MESSAGE field from long varchar to clob +ALTER TABLE "APP"."NOTIFICATION_LOG" ADD COLUMN "MESSAGE_CLOB" CLOB; +UPDATE "APP"."NOTIFICATION_LOG" SET MESSAGE_CLOB=CAST(MESSAGE AS CLOB); +ALTER TABLE "APP"."NOTIFICATION_LOG" DROP COLUMN MESSAGE; +RENAME COLUMN "APP"."NOTIFICATION_LOG"."MESSAGE_CLOB" TO "MESSAGE"; + +-- ALTER TABLE "APP"."NOTIFICATION_LOG" ALTER COLUMN "MESSAGE" SET DATA TYPE CLOB; + +--RUN '039-HIVE-12274.derby.sql'; +-- add a new temp column,type clob, drop the old column and rename temp to old col +-- change COLUMNS_V2.TYPE_NAME to CLOB +ALTER TABLE "APP"."COLUMNS_V2" ADD COLUMN "TYPE_NAME_CLOB" CLOB; +UPDATE "APP"."COLUMNS_V2" SET TYPE_NAME_CLOB=CAST(TYPE_NAME AS CLOB); +ALTER TABLE "APP"."COLUMNS_V2" DROP COLUMN TYPE_NAME; +RENAME COLUMN "APP"."COLUMNS_V2"."TYPE_NAME_CLOB" TO "TYPE_NAME"; + +-- change TABLE_PARAMS.PARAM_VALUE to CLOB +ALTER TABLE "APP"."TABLE_PARAMS" ADD COLUMN "PARAM_VALUE_CLOB" CLOB; +UPDATE "APP"."TABLE_PARAMS" SET PARAM_VALUE_CLOB=CAST(PARAM_VALUE AS CLOB); +ALTER TABLE "APP"."TABLE_PARAMS" DROP COLUMN PARAM_VALUE; +RENAME COLUMN "APP"."TABLE_PARAMS"."PARAM_VALUE_CLOB" TO "PARAM_VALUE"; + +-- change SERDE_PARAMS.PARAM_VALUE to CLOB +ALTER TABLE "APP"."SERDE_PARAMS" ADD COLUMN "SERDE_PV_CLOB" CLOB; +UPDATE "APP"."SERDE_PARAMS" SET SERDE_PV_CLOB=CAST(PARAM_VALUE AS CLOB); +ALTER TABLE "APP"."SERDE_PARAMS" DROP COLUMN PARAM_VALUE; +RENAME COLUMN "APP"."SERDE_PARAMS"."SERDE_PV_CLOB" TO "PARAM_VALUE"; + +-- change SD_PARAMS.PARAM_VALUE to CLOB +ALTER TABLE "APP"."SD_PARAMS" ADD COLUMN "SD_PV_CLOB" CLOB; +UPDATE "APP"."SD_PARAMS" SET SD_PV_CLOB=CAST(PARAM_VALUE AS CLOB); +ALTER TABLE "APP"."SD_PARAMS" DROP COLUMN PARAM_VALUE; +RENAME COLUMN "APP"."SD_PARAMS"."SD_PV_CLOB" TO "PARAM_VALUE"; + +-- expand a hive table name length to 256 chars +ALTER TABLE "APP"."TBLS" ALTER COLUMN "TBL_NAME" SET DATA TYPE VARCHAR(256); +ALTER TABLE "APP"."NOTIFICATION_LOG" ALTER COLUMN "TBL_NAME" SET DATA TYPE VARCHAR(256); +ALTER TABLE "APP"."PARTITION_EVENTS" ALTER COLUMN "TBL_NAME" SET DATA TYPE VARCHAR(256); +ALTER TABLE "APP"."TAB_COL_STATS" ALTER COLUMN "TABLE_NAME" SET DATA TYPE VARCHAR(256); +ALTER TABLE "APP"."PART_COL_STATS" ALTER COLUMN "TABLE_NAME" SET DATA TYPE VARCHAR(256); +ALTER TABLE "APP"."COMPLETED_TXN_COMPONENTS" ALTER COLUMN "CTC_TABLE" SET DATA TYPE VARCHAR(256); + +UPDATE "APP".VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; diff --git standalone-metastore/src/main/sql/derby/upgrade-2.2.0-to-2.3.0.derby.sql standalone-metastore/src/main/sql/derby/upgrade-2.2.0-to-2.3.0.derby.sql new file mode 100644 index 0000000000..8dcd0f6cd7 --- /dev/null +++ standalone-metastore/src/main/sql/derby/upgrade-2.2.0-to-2.3.0.derby.sql @@ -0,0 +1,5 @@ +-- Upgrade MetaStore schema from 2.2.0 to 2.3.0 +--RUN '040-HIVE-16399.derby.sql'; +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +UPDATE "APP".VERSION SET SCHEMA_VERSION='2.3.0', VERSION_COMMENT='Hive release version 2.3.0' where VER_ID=1; diff --git standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql new file mode 100644 index 0000000000..d49255a545 --- /dev/null +++ standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql @@ -0,0 +1,96 @@ +-- Upgrade MetaStore schema from 2.3.0 to 3.0.0 +--RUN '041-HIVE-16556.derby.sql'; +CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NULL, "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, "DESCRIPTION" VARCHAR(1000)); + +ALTER TABLE "APP"."METASTORE_DB_PROPERTIES" ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY"); +--RUN '042-HIVE-16575.derby.sql'; +-- Remove the NOT NULL constraint from the CHILD_INTEGER_IDX column +ALTER TABLE "APP"."KEY_CONSTRAINTS" ALTER COLUMN "CHILD_INTEGER_IDX" NULL; + +CREATE INDEX "APP"."CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "APP"."KEY_CONSTRAINTS"("CONSTRAINT_TYPE"); +--RUN '043-HIVE-16922.derby.sql'; +UPDATE SERDE_PARAMS +SET PARAM_KEY='collection.delim' +WHERE PARAM_KEY='colelction.delim'; +--RUN '044-HIVE-16997.derby.sql'; +ALTER TABLE "APP"."PART_COL_STATS" ADD COLUMN "BIT_VECTOR" BLOB; +--RUN '045-HIVE-16886.derby.sql'; +INSERT INTO "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT * FROM (VALUES (1,1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "APP"."NOTIFICATION_SEQUENCE"); +--RUN '046-HIVE-17566.derby.sql'; +CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL, DEFAULT_POOL_ID BIGINT); +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NAME"); +ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID"); + +CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024)); +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH"); +ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID"); +ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; +ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024), IS_IN_UNMANAGED INTEGER NOT NULL DEFAULT 0); +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_TRIGGER" ON "APP"."WM_TRIGGER" ("RP_ID", "NAME"); +ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID"); +ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +CREATE TABLE "APP"."WM_POOL_TO_TRIGGER" (POOL_ID BIGINT NOT NULL, TRIGGER_ID BIGINT NOT NULL); +ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_PK" PRIMARY KEY ("POOL_ID", "TRIGGER_ID"); +ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; +ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "APP"."WM_TRIGGER" ("TRIGGER_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, ENTITY_TYPE VARCHAR(128) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID BIGINT, ORDERING INTEGER); +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME"); +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_PK" PRIMARY KEY ("MAPPING_ID"); +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +UPDATE "APP".VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; + +-- 048-HIVE-14498 +-- create mv_creation_metadata table +CREATE TABLE "APP"."MV_CREATION_METADATA" ( + "MV_CREATION_METADATA_ID" BIGINT NOT NULL, + "DB_NAME" VARCHAR(128) NOT NULL, + "TBL_NAME" VARCHAR(256) NOT NULL, + "TXN_LIST" CLOB +); + +CREATE TABLE "APP"."MV_TABLES_USED" ( + "MV_CREATION_METADATA_ID" BIGINT NOT NULL, + "TBL_ID" BIGINT NOT NULL +); + +ALTER TABLE "APP"."MV_CREATION_METADATA" ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID"); + +CREATE UNIQUE INDEX "APP"."MV_UNIQUE_TABLE" ON "APP"."MV_CREATION_METADATA" ("TBL_NAME", "DB_NAME"); + +ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN KEY ("MV_CREATION_METADATA_ID") REFERENCES "APP"."MV_CREATION_METADATA" ("MV_CREATION_METADATA_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +-- modify completed_txn_components table +ALTER TABLE "APP"."COMPLETED_TXN_COMPONENTS" ADD "CTC_TIMESTAMP" timestamp; + +UPDATE "APP"."TBLS" SET "IS_REWRITE_ENABLED" = CURRENT_TIMESTAMP; + +ALTER TABLE "APP"."COMPLETED_TXN_COMPONENTS" ALTER COLUMN "CTC_TIMESTAMP" SET DEFAULT CURRENT_TIMESTAMP; + +ALTER TABLE "APP"."COMPLETED_TXN_COMPONENTS" ALTER COLUMN "CTC_TIMESTAMP" NOT NULL; + +CREATE INDEX "APP"."COMPLETED_TXN_COMPONENTS_IDX" ON "APP"."COMPLETED_TXN_COMPONENTS" ("CTC_DATABASE", "CTC_TABLE", "CTC_PARTITION"); + +-- 049-HIVE-18489.derby.sql +UPDATE FUNC_RU + SET RESOURCE_URI = 's3a' || SUBSTR(RESOURCE_URI, 4) + WHERE RESOURCE_URI LIKE 's3n://%' ; + +UPDATE SKEWED_COL_VALUE_LOC_MAP + SET LOCATION = 's3a' || SUBSTR(LOCATION, 4) + WHERE LOCATION LIKE 's3n://%' ; + +UPDATE SDS + SET LOCATION = 's3a' || SUBSTR(LOCATION, 4) + WHERE LOCATION LIKE 's3n://%' ; + +UPDATE DBS + SET DB_LOCATION_URI = 's3a' || SUBSTR(DB_LOCATION_URI, 4) + WHERE DB_LOCATION_URI LIKE 's3n://%' ; diff --git standalone-metastore/src/main/sql/derby/upgrade.order.derby standalone-metastore/src/main/sql/derby/upgrade.order.derby new file mode 100644 index 0000000000..d7091b5228 --- /dev/null +++ standalone-metastore/src/main/sql/derby/upgrade.order.derby @@ -0,0 +1,16 @@ +0.5.0-to-0.6.0 +0.6.0-to-0.7.0 +0.7.0-to-0.8.0 +0.8.0-to-0.9.0 +0.9.0-to-0.10.0 +0.10.0-to-0.11.0 +0.11.0-to-0.12.0 +0.12.0-to-0.13.0 +0.13.0-to-0.14.0 +0.14.0-to-1.1.0 +1.1.0-to-1.2.0 +1.2.0-to-2.0.0 +2.0.0-to-2.1.0 +2.1.0-to-2.2.0 +2.2.0-to-2.3.0 +2.3.0-to-3.0.0 diff --git standalone-metastore/src/main/sql/mssql/create-user.mssql.sql standalone-metastore/src/main/sql/mssql/create-user.mssql.sql new file mode 100644 index 0000000000..cb39118993 --- /dev/null +++ standalone-metastore/src/main/sql/mssql/create-user.mssql.sql @@ -0,0 +1,5 @@ +CREATE DATABASE _REPLACE_WITH_DB_; +use _REPLACE_WITH_DB_; +CREATE LOGIN _REPLACE_WITH_USER_ WITH PASSWORD='_REPLACE_WITH_PASSWD_'; +CREATE USER _REPLACE_WITH_USER_ FOR LOGIN _REPLACE_WITH_USER_; +ALTER ROLE db_owner ADD MEMBER _REPLACE_WITH_USER_; diff --git standalone-metastore/src/main/sql/mssql/hive-schema-1.2.0.mssql.sql standalone-metastore/src/main/sql/mssql/hive-schema-1.2.0.mssql.sql new file mode 100644 index 0000000000..0bbd647947 --- /dev/null +++ standalone-metastore/src/main/sql/mssql/hive-schema-1.2.0.mssql.sql @@ -0,0 +1,947 @@ +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. + +------------------------------------------------------------------ +-- DataNucleus SchemaTool (ran at 08/04/2014 15:10:15) +------------------------------------------------------------------ +-- Complete schema required for the following classes:- +-- org.apache.hadoop.hive.metastore.model.MColumnDescriptor +-- org.apache.hadoop.hive.metastore.model.MDBPrivilege +-- org.apache.hadoop.hive.metastore.model.MDatabase +-- org.apache.hadoop.hive.metastore.model.MDelegationToken +-- org.apache.hadoop.hive.metastore.model.MFieldSchema +-- org.apache.hadoop.hive.metastore.model.MFunction +-- org.apache.hadoop.hive.metastore.model.MGlobalPrivilege +-- org.apache.hadoop.hive.metastore.model.MIndex +-- org.apache.hadoop.hive.metastore.model.MMasterKey +-- org.apache.hadoop.hive.metastore.model.MOrder +-- org.apache.hadoop.hive.metastore.model.MPartition +-- org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege +-- org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics +-- org.apache.hadoop.hive.metastore.model.MPartitionEvent +-- org.apache.hadoop.hive.metastore.model.MPartitionPrivilege +-- org.apache.hadoop.hive.metastore.model.MResourceUri +-- org.apache.hadoop.hive.metastore.model.MRole +-- org.apache.hadoop.hive.metastore.model.MRoleMap +-- org.apache.hadoop.hive.metastore.model.MSerDeInfo +-- org.apache.hadoop.hive.metastore.model.MStorageDescriptor +-- org.apache.hadoop.hive.metastore.model.MStringList +-- org.apache.hadoop.hive.metastore.model.MTable +-- org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege +-- org.apache.hadoop.hive.metastore.model.MTableColumnStatistics +-- org.apache.hadoop.hive.metastore.model.MTablePrivilege +-- org.apache.hadoop.hive.metastore.model.MType +-- org.apache.hadoop.hive.metastore.model.MVersionTable +-- +-- Table MASTER_KEYS for classes [org.apache.hadoop.hive.metastore.model.MMasterKey] +CREATE TABLE MASTER_KEYS +( + KEY_ID int NOT NULL, + MASTER_KEY nvarchar(767) NULL +); + +ALTER TABLE MASTER_KEYS ADD CONSTRAINT MASTER_KEYS_PK PRIMARY KEY (KEY_ID); + +-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex] +CREATE TABLE IDXS +( + INDEX_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + DEFERRED_REBUILD bit NOT NULL, + INDEX_HANDLER_CLASS nvarchar(4000) NULL, + INDEX_NAME nvarchar(128) NULL, + INDEX_TBL_ID bigint NULL, + LAST_ACCESS_TIME int NOT NULL, + ORIG_TBL_ID bigint NULL, + SD_ID bigint NULL +); + +ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID); + +-- Table PART_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics] +CREATE TABLE PART_COL_STATS +( + CS_ID bigint NOT NULL, + AVG_COL_LEN float NULL, + "COLUMN_NAME" nvarchar(128) NOT NULL, + COLUMN_TYPE nvarchar(128) NOT NULL, + DB_NAME nvarchar(128) NOT NULL, + BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL, + BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL, + DOUBLE_HIGH_VALUE float NULL, + DOUBLE_LOW_VALUE float NULL, + LAST_ANALYZED bigint NOT NULL, + LONG_HIGH_VALUE bigint NULL, + LONG_LOW_VALUE bigint NULL, + MAX_COL_LEN bigint NULL, + NUM_DISTINCTS bigint NULL, + NUM_FALSES bigint NULL, + NUM_NULLS bigint NOT NULL, + NUM_TRUES bigint NULL, + PART_ID bigint NULL, + PARTITION_NAME nvarchar(767) NOT NULL, + "TABLE_NAME" nvarchar(128) NOT NULL +); + +ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID); + +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); + +-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege] +CREATE TABLE PART_PRIVS +( + PART_GRANT_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PART_ID bigint NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + PART_PRIV nvarchar(128) NULL +); + +ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID); + +-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList] +CREATE TABLE SKEWED_STRING_LIST +( + STRING_LIST_ID bigint NOT NULL +); + +ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID); + +-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole] +CREATE TABLE ROLES +( + ROLE_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + OWNER_NAME nvarchar(128) NULL, + ROLE_NAME nvarchar(128) NULL +); + +ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID); + +-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition] +CREATE TABLE PARTITIONS +( + PART_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + LAST_ACCESS_TIME int NOT NULL, + PART_NAME nvarchar(767) NULL, + SD_ID bigint NULL, + TBL_ID bigint NULL +); + +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID); + +-- Table CDS for classes [org.apache.hadoop.hive.metastore.model.MColumnDescriptor] +CREATE TABLE CDS +( + CD_ID bigint NOT NULL +); + +ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID); + +-- Table VERSION for classes [org.apache.hadoop.hive.metastore.model.MVersionTable] +CREATE TABLE VERSION +( + VER_ID bigint NOT NULL, + SCHEMA_VERSION nvarchar(127) NOT NULL, + VERSION_COMMENT nvarchar(255) NOT NULL +); + +ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID); + +-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege] +CREATE TABLE GLOBAL_PRIVS +( + USER_GRANT_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + USER_PRIV nvarchar(128) NULL +); + +ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID); + +-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege] +CREATE TABLE PART_COL_PRIVS +( + PART_COLUMN_GRANT_ID bigint NOT NULL, + "COLUMN_NAME" nvarchar(128) NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PART_ID bigint NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + PART_COL_PRIV nvarchar(128) NULL +); + +ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID); + +-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege] +CREATE TABLE DB_PRIVS +( + DB_GRANT_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + DB_ID bigint NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + DB_PRIV nvarchar(128) NULL +); + +ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID); + +-- Table TAB_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics] +CREATE TABLE TAB_COL_STATS +( + CS_ID bigint NOT NULL, + AVG_COL_LEN float NULL, + "COLUMN_NAME" nvarchar(128) NOT NULL, + COLUMN_TYPE nvarchar(128) NOT NULL, + DB_NAME nvarchar(128) NOT NULL, + BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL, + BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL, + DOUBLE_HIGH_VALUE float NULL, + DOUBLE_LOW_VALUE float NULL, + LAST_ANALYZED bigint NOT NULL, + LONG_HIGH_VALUE bigint NULL, + LONG_LOW_VALUE bigint NULL, + MAX_COL_LEN bigint NULL, + NUM_DISTINCTS bigint NULL, + NUM_FALSES bigint NULL, + NUM_NULLS bigint NOT NULL, + NUM_TRUES bigint NULL, + TBL_ID bigint NULL, + "TABLE_NAME" nvarchar(128) NOT NULL +); + +ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID); + +-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType] +CREATE TABLE TYPES +( + TYPES_ID bigint NOT NULL, + TYPE_NAME nvarchar(128) NULL, + TYPE1 nvarchar(767) NULL, + TYPE2 nvarchar(767) NULL +); + +ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID); + +-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege] +CREATE TABLE TBL_PRIVS +( + TBL_GRANT_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + TBL_PRIV nvarchar(128) NULL, + TBL_ID bigint NULL +); + +ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID); + +-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase] +CREATE TABLE DBS +( + DB_ID bigint NOT NULL, + "DESC" nvarchar(4000) NULL, + DB_LOCATION_URI nvarchar(4000) NOT NULL, + "NAME" nvarchar(128) NULL, + OWNER_NAME nvarchar(128) NULL, + OWNER_TYPE nvarchar(10) NULL +); + +ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID); + +-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] +CREATE TABLE TBL_COL_PRIVS +( + TBL_COLUMN_GRANT_ID bigint NOT NULL, + "COLUMN_NAME" nvarchar(128) NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + TBL_COL_PRIV nvarchar(128) NULL, + TBL_ID bigint NULL +); + +ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID); + +-- Table DELEGATION_TOKENS for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken] +CREATE TABLE DELEGATION_TOKENS +( + TOKEN_IDENT nvarchar(767) NOT NULL, + TOKEN nvarchar(767) NULL +); + +ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (TOKEN_IDENT); + +-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo] +CREATE TABLE SERDES +( + SERDE_ID bigint NOT NULL, + "NAME" nvarchar(128) NULL, + SLIB nvarchar(4000) NULL +); + +ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID); + +-- Table FUNCS for classes [org.apache.hadoop.hive.metastore.model.MFunction] +CREATE TABLE FUNCS +( + FUNC_ID bigint NOT NULL, + CLASS_NAME nvarchar(4000) NULL, + CREATE_TIME int NOT NULL, + DB_ID bigint NULL, + FUNC_NAME nvarchar(128) NULL, + FUNC_TYPE int NOT NULL, + OWNER_NAME nvarchar(128) NULL, + OWNER_TYPE nvarchar(10) NULL +); + +ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID); + +-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap] +CREATE TABLE ROLE_MAP +( + ROLE_GRANT_ID bigint NOT NULL, + ADD_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + ROLE_ID bigint NULL +); + +ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID); + +-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable] +CREATE TABLE TBLS +( + TBL_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + DB_ID bigint NULL, + LAST_ACCESS_TIME int NOT NULL, + OWNER nvarchar(767) NULL, + RETENTION int NOT NULL, + SD_ID bigint NULL, + TBL_NAME nvarchar(128) NULL, + TBL_TYPE nvarchar(128) NULL, + VIEW_EXPANDED_TEXT text NULL, + VIEW_ORIGINAL_TEXT text NULL +); + +ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); + +-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor] +CREATE TABLE SDS +( + SD_ID bigint NOT NULL, + CD_ID bigint NULL, + INPUT_FORMAT nvarchar(4000) NULL, + IS_COMPRESSED bit NOT NULL, + IS_STOREDASSUBDIRECTORIES bit NOT NULL, + LOCATION nvarchar(4000) NULL, + NUM_BUCKETS int NOT NULL, + OUTPUT_FORMAT nvarchar(4000) NULL, + SERDE_ID bigint NULL +); + +ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID); + +-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent] +CREATE TABLE PARTITION_EVENTS +( + PART_NAME_ID bigint NOT NULL, + DB_NAME nvarchar(128) NULL, + EVENT_TIME bigint NOT NULL, + EVENT_TYPE int NOT NULL, + PARTITION_NAME nvarchar(767) NULL, + TBL_NAME nvarchar(128) NULL +); + +ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID); + +-- Table SORT_COLS for join relationship +CREATE TABLE SORT_COLS +( + SD_ID bigint NOT NULL, + "COLUMN_NAME" nvarchar(128) NULL, + "ORDER" int NOT NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table SKEWED_COL_NAMES for join relationship +CREATE TABLE SKEWED_COL_NAMES +( + SD_ID bigint NOT NULL, + SKEWED_COL_NAME nvarchar(255) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table SKEWED_COL_VALUE_LOC_MAP for join relationship +CREATE TABLE SKEWED_COL_VALUE_LOC_MAP +( + SD_ID bigint NOT NULL, + STRING_LIST_ID_KID bigint NOT NULL, + LOCATION nvarchar(4000) NULL +); + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID); + +-- Table SKEWED_STRING_LIST_VALUES for join relationship +CREATE TABLE SKEWED_STRING_LIST_VALUES +( + STRING_LIST_ID bigint NOT NULL, + STRING_LIST_VALUE nvarchar(255) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX); + +-- Table PARTITION_KEY_VALS for join relationship +CREATE TABLE PARTITION_KEY_VALS +( + PART_ID bigint NOT NULL, + PART_KEY_VAL nvarchar(255) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX); + +-- Table PARTITION_KEYS for join relationship +CREATE TABLE PARTITION_KEYS +( + TBL_ID bigint NOT NULL, + PKEY_COMMENT nvarchar(4000) NULL, + PKEY_NAME nvarchar(128) NOT NULL, + PKEY_TYPE nvarchar(767) NOT NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME); + +-- Table SKEWED_VALUES for join relationship +CREATE TABLE SKEWED_VALUES +( + SD_ID_OID bigint NOT NULL, + STRING_LIST_ID_EID bigint NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX); + +-- Table SD_PARAMS for join relationship +CREATE TABLE SD_PARAMS +( + SD_ID bigint NOT NULL, + PARAM_KEY nvarchar(256) NOT NULL, + PARAM_VALUE nvarchar(4000) NULL +); + +ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY); + +-- Table FUNC_RU for join relationship +CREATE TABLE FUNC_RU +( + FUNC_ID bigint NOT NULL, + RESOURCE_TYPE int NOT NULL, + RESOURCE_URI nvarchar(4000) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID,INTEGER_IDX); + +-- Table TYPE_FIELDS for join relationship +CREATE TABLE TYPE_FIELDS +( + TYPE_NAME bigint NOT NULL, + COMMENT nvarchar(256) NULL, + FIELD_NAME nvarchar(128) NOT NULL, + FIELD_TYPE nvarchar(767) NOT NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME); + +-- Table BUCKETING_COLS for join relationship +CREATE TABLE BUCKETING_COLS +( + SD_ID bigint NOT NULL, + BUCKET_COL_NAME nvarchar(255) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table DATABASE_PARAMS for join relationship +CREATE TABLE DATABASE_PARAMS +( + DB_ID bigint NOT NULL, + PARAM_KEY nvarchar(180) NOT NULL, + PARAM_VALUE nvarchar(4000) NULL +); + +ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY); + +-- Table INDEX_PARAMS for join relationship +CREATE TABLE INDEX_PARAMS +( + INDEX_ID bigint NOT NULL, + PARAM_KEY nvarchar(256) NOT NULL, + PARAM_VALUE nvarchar(4000) NULL +); + +ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY); + +-- Table COLUMNS_V2 for join relationship +CREATE TABLE COLUMNS_V2 +( + CD_ID bigint NOT NULL, + COMMENT nvarchar(256) NULL, + "COLUMN_NAME" nvarchar(128) NOT NULL, + TYPE_NAME nvarchar(4000) NOT NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME"); + +-- Table SERDE_PARAMS for join relationship +CREATE TABLE SERDE_PARAMS +( + SERDE_ID bigint NOT NULL, + PARAM_KEY nvarchar(256) NOT NULL, + PARAM_VALUE nvarchar(4000) NULL +); + +ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY); + +-- Table PARTITION_PARAMS for join relationship +CREATE TABLE PARTITION_PARAMS +( + PART_ID bigint NOT NULL, + PARAM_KEY nvarchar(256) NOT NULL, + PARAM_VALUE nvarchar(4000) NULL +); + +ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY); + +-- Table TABLE_PARAMS for join relationship +CREATE TABLE TABLE_PARAMS +( + TBL_ID bigint NOT NULL, + PARAM_KEY nvarchar(256) NOT NULL, + PARAM_VALUE nvarchar(4000) NULL +); + +ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY); + +CREATE TABLE NOTIFICATION_LOG +( + NL_ID bigint NOT NULL, + EVENT_ID bigint NOT NULL, + EVENT_TIME int NOT NULL, + EVENT_TYPE nvarchar(32) NOT NULL, + DB_NAME nvarchar(128) NULL, + TBL_NAME nvarchar(128) NULL, + MESSAGE text NULL +); + +ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID); + +CREATE TABLE NOTIFICATION_SEQUENCE +( + NNI_ID bigint NOT NULL, + NEXT_EVENT_ID bigint NOT NULL +); + +ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID); + +-- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey] + +-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex] +ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) ; + +ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID); + +CREATE INDEX IDXS_N51 ON IDXS (SD_ID); + +CREATE INDEX IDXS_N50 ON IDXS (ORIG_TBL_ID); + +CREATE INDEX IDXS_N49 ON IDXS (INDEX_TBL_ID); + + +-- Constraints for table PART_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics] +ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID); + + +-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege] +ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID); + + +-- Constraints for table SKEWED_STRING_LIST for class(es) [org.apache.hadoop.hive.metastore.model.MStringList] + +-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole] +CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME); + + +-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition] +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID); + +CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID); + +CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID); + + +-- Constraints for table CDS for class(es) [org.apache.hadoop.hive.metastore.model.MColumnDescriptor] + +-- Constraints for table VERSION for class(es) [org.apache.hadoop.hive.metastore.model.MVersionTable] + +-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege] +CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege] +ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID); + +CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege] +ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ; + +CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID); + + +-- Constraints for table TAB_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics] +ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS (TBL_ID); + + +-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType] +CREATE UNIQUE INDEX UNIQUETYPE ON TYPES (TYPE_NAME); + + +-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege] +ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID); + +CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase] +CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME"); + + +-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] +ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID); + + +-- Constraints for table DELEGATION_TOKENS for class(es) [org.apache.hadoop.hive.metastore.model.MDelegationToken] + +-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo] + +-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunction] +ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ; + +CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME,DB_ID); + +CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID); + + +-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap] +ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) ; + +CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID); + +CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable] +ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ; + +CREATE INDEX TBLS_N50 ON TBLS (SD_ID); + +CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID); + +CREATE INDEX TBLS_N49 ON TBLS (DB_ID); + + +-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor] +ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ; + +ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ; + +CREATE INDEX SDS_N50 ON SDS (CD_ID); + +CREATE INDEX SDS_N49 ON SDS (SERDE_ID); + + +-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent] +CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME); + + +-- Constraints for table SORT_COLS +ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID); + + +-- Constraints for table SKEWED_COL_NAMES +ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX SKEWED_COL_NAMES_N49 ON SKEWED_COL_NAMES (SD_ID); + + +-- Constraints for table SKEWED_COL_VALUE_LOC_MAP +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ; + +CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N50 ON SKEWED_COL_VALUE_LOC_MAP (STRING_LIST_ID_KID); + +CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N49 ON SKEWED_COL_VALUE_LOC_MAP (SD_ID); + + +-- Constraints for table SKEWED_STRING_LIST_VALUES +ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ; + +CREATE INDEX SKEWED_STRING_LIST_VALUES_N49 ON SKEWED_STRING_LIST_VALUES (STRING_LIST_ID); + + +-- Constraints for table PARTITION_KEY_VALS +ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID); + + +-- Constraints for table PARTITION_KEYS +ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID); + + +-- Constraints for table SKEWED_VALUES +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) ; + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ; + +CREATE INDEX SKEWED_VALUES_N50 ON SKEWED_VALUES (SD_ID_OID); + +CREATE INDEX SKEWED_VALUES_N49 ON SKEWED_VALUES (STRING_LIST_ID_EID); + + +-- Constraints for table SD_PARAMS +ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID); + + +-- Constraints for table FUNC_RU +ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) ; + +CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID); + + +-- Constraints for table TYPE_FIELDS +ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) ; + +CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME); + + +-- Constraints for table BUCKETING_COLS +ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID); + + +-- Constraints for table DATABASE_PARAMS +ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ; + +CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID); + + +-- Constraints for table INDEX_PARAMS +ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) ; + +CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID); + + +-- Constraints for table COLUMNS_V2 +ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ; + +CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID); + + +-- Constraints for table SERDE_PARAMS +ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ; + +CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID); + + +-- Constraints for table PARTITION_PARAMS +ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID); + + +-- Constraints for table TABLE_PARAMS +ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID); + + + +-- ----------------------------------------------------------------------------------------------------------------------------------------------- +-- Transaction and Lock Tables +-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file. +-- ----------------------------------------------------------------------------------------------------------------------------------------------- +CREATE TABLE COMPACTION_QUEUE( + CQ_ID bigint NOT NULL, + CQ_DATABASE nvarchar(128) NOT NULL, + CQ_TABLE nvarchar(128) NOT NULL, + CQ_PARTITION nvarchar(767) NULL, + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_WORKER_ID nvarchar(128) NULL, + CQ_START bigint NULL, + CQ_RUN_AS nvarchar(128) NULL, +PRIMARY KEY CLUSTERED +( + CQ_ID ASC +) +); + +CREATE TABLE COMPLETED_TXN_COMPONENTS( + CTC_TXNID bigint NULL, + CTC_DATABASE nvarchar(128) NOT NULL, + CTC_TABLE nvarchar(128) NULL, + CTC_PARTITION nvarchar(767) NULL +); + +CREATE TABLE HIVE_LOCKS( + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint NULL, + HL_DB nvarchar(128) NOT NULL, + HL_TABLE nvarchar(128) NULL, + HL_PARTITION nvarchar(767) NULL, + HL_LOCK_STATE char(1) NOT NULL, + HL_LOCK_TYPE char(1) NOT NULL, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint NULL, + HL_USER nvarchar(128) NOT NULL, + HL_HOST nvarchar(128) NOT NULL, +PRIMARY KEY CLUSTERED +( + HL_LOCK_EXT_ID ASC, + HL_LOCK_INT_ID ASC +) +); + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID( + NCQ_NEXT bigint NOT NULL +); + +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + +CREATE TABLE NEXT_LOCK_ID( + NL_NEXT bigint NOT NULL +); + +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE NEXT_TXN_ID( + NTXN_NEXT bigint NOT NULL +); + +INSERT INTO NEXT_TXN_ID VALUES(1); + +CREATE TABLE TXNS( + TXN_ID bigint NOT NULL, + TXN_STATE char(1) NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, + TXN_USER nvarchar(128) NOT NULL, + TXN_HOST nvarchar(128) NOT NULL, +PRIMARY KEY CLUSTERED +( + TXN_ID ASC +) +); + +CREATE TABLE TXN_COMPONENTS( + TC_TXNID bigint NULL, + TC_DATABASE nvarchar(128) NOT NULL, + TC_TABLE nvarchar(128) NULL, + TC_PARTITION nvarchar(767) NULL +); + +ALTER TABLE TXN_COMPONENTS WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID); + + + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '1.2.0', 'Hive release version 1.2.0'); diff --git metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql similarity index 99% rename from metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql rename to standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql index 448086eecc..51bd8f3f30 100644 --- metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql +++ standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql @@ -634,7 +634,7 @@ CREATE TABLE WM_POOL POOL_ID bigint NOT NULL, RP_ID bigint NOT NULL, PATH nvarchar(1024) NOT NULL, - ALLOC_FRACTION DOUBLE, + ALLOC_FRACTION float, QUERY_PARALLELISM int, SCHEDULING_POLICY nvarchar(1024) ); @@ -942,8 +942,6 @@ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFA ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); -ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID); - CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME"); ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); @@ -956,7 +954,6 @@ CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); - -- ----------------------------------------------------------------------------------------------------------------------------------------------- -- Transaction and Lock Tables -- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file. @@ -1006,9 +1003,14 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS( CTC_TXNID bigint NULL, CTC_DATABASE nvarchar(128) NOT NULL, CTC_TABLE nvarchar(128) NULL, - CTC_PARTITION nvarchar(767) NULL + CTC_PARTITION nvarchar(767) NULL, + CTC_ID bigint IDENTITY (1, 1), + CTC_TIMESTAMP datetime2 DEFAULT CURRENT_TIMESTAMP NOT NULL ); +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_ID); +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX2 ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + CREATE TABLE HIVE_LOCKS( HL_LOCK_EXT_ID bigint NOT NULL, HL_LOCK_INT_ID bigint NOT NULL, diff --git standalone-metastore/src/main/sql/mssql/upgrade-1.2.0-to-2.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/upgrade-1.2.0-to-2.0.0.mssql.sql new file mode 100644 index 0000000000..3d4d713d50 --- /dev/null +++ standalone-metastore/src/main/sql/mssql/upgrade-1.2.0-to-2.0.0.mssql.sql @@ -0,0 +1,73 @@ +SELECT 'Upgrading MetaStore schema from 1.2.0 to 2.0.0' AS MESSAGE; + +--:r 007-HIVE-11970.mssql.sql +ALTER TABLE "COLUMNS_V2" ALTER COLUMN "COLUMN_NAME" nvarchar(1000) NOT NULL; +ALTER TABLE "PART_COL_PRIVS" ALTER COLUMN "COLUMN_NAME" nvarchar(1000) NULL; +ALTER TABLE "TBL_COL_PRIVS" ALTER COLUMN "COLUMN_NAME" nvarchar(1000) NULL; +ALTER TABLE "SORT_COLS" ALTER COLUMN "COLUMN_NAME" nvarchar(1000) NULL; +ALTER TABLE "TAB_COL_STATS" ALTER COLUMN "COLUMN_NAME" nvarchar(1000) NOT NULL; +ALTER TABLE "PART_COL_STATS" ALTER COLUMN "COLUMN_NAME" nvarchar(1000) NOT NULL; + +--:r 008-HIVE-12807.mssql.sql +ALTER TABLE COMPACTION_QUEUE ADD CQ_HIGHEST_TXN_ID bigint NULL; + +--:r 009-HIVE-12814.mssql.sql +ALTER TABLE COMPACTION_QUEUE ADD CQ_META_INFO varbinary(2048) NULL; + +--:r 010-HIVE-12816.mssql.sql +ALTER TABLE COMPACTION_QUEUE ADD CQ_HADOOP_JOB_ID nvarchar(32) NULL; + +--:r 011-HIVE-12818.mssql.sql +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID bigint NOT NULL, + CC_DATABASE nvarchar(128) NOT NULL, + CC_TABLE nvarchar(128) NOT NULL, + CC_PARTITION nvarchar(767) NULL, + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_WORKER_ID nvarchar(128) NULL, + CC_START bigint NULL, + CC_END bigint NULL, + CC_RUN_AS nvarchar(128) NULL, + CC_HIGHEST_TXN_ID bigint NULL, + CC_META_INFO varbinary(2048) NULL, + CC_HADOOP_JOB_ID nvarchar(128) NULL, +PRIMARY KEY CLUSTERED +( + CC_ID ASC +) +); + + + +--:r 012-HIVE-12819.mssql.sql +ALTER TABLE TXNS ADD TXN_AGENT_INFO nvarchar(128) NULL; + +--:r 013-HIVE-12821.mssql.sql +ALTER TABLE TXNS ADD TXN_HEARTBEAT_COUNT int NULL; +ALTER TABLE HIVE_LOCKS ADD HL_HEARTBEAT_COUNT int NULL; + +--:r 014-HIVE-12822.mssql.sql +ALTER TABLE TXNS ADD TXN_META_INFO nvarchar(128) NULL; + +--:r 015-HIVE-12823.mssql.sql +ALTER TABLE HIVE_LOCKS ADD HL_AGENT_INFO nvarchar(128) NULL; + +--:r 016-HIVE-12831.mssql.sql +ALTER TABLE HIVE_LOCKS ADD HL_BLOCKEDBY_EXT_ID bigint NULL; +ALTER TABLE HIVE_LOCKS ADD HL_BLOCKEDBY_INT_ID bigint NULL; + +--:r 017-HIVE-12832.mssql.sql +CREATE TABLE AUX_TABLE ( + MT_KEY1 nvarchar(128) NOT NULL, + MT_KEY2 bigint NOT NULL, + MT_COMMENT nvarchar(255) NULL, + PRIMARY KEY CLUSTERED +( + MT_KEY1 ASC, + MT_KEY2 ASC +) +); + +UPDATE VERSION SET SCHEMA_VERSION='2.0.0', VERSION_COMMENT='Hive release version 2.0.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 1.2.0 to 2.0.0' AS MESSAGE; diff --git standalone-metastore/src/main/sql/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql standalone-metastore/src/main/sql/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql new file mode 100644 index 0000000000..623b352359 --- /dev/null +++ standalone-metastore/src/main/sql/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql @@ -0,0 +1,39 @@ +SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0' AS MESSAGE; + +--:r 019-HIVE-13076.mssql.sql +CREATE TABLE KEY_CONSTRAINTS +( + CHILD_CD_ID BIGINT, + CHILD_INTEGER_IDX INT, + CHILD_TBL_ID BIGINT, + PARENT_CD_ID BIGINT NOT NULL, + PARENT_INTEGER_IDX INT NOT NULL, + PARENT_TBL_ID BIGINT NOT NULL, + POSITION INT NOT NULL, + CONSTRAINT_NAME VARCHAR(400) NOT NULL, + CONSTRAINT_TYPE SMALLINT NOT NULL, + UPDATE_RULE SMALLINT, + DELETE_RULE SMALLINT, + ENABLE_VALIDATE_RELY SMALLINT NOT NULL +) ; +ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION); +CREATE INDEX CONSTRAINTS_PARENT_TBL_ID__INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID); + +--:r 020-HIVE-13395.mssql.sql +CREATE TABLE WRITE_SET ( + WS_DATABASE nvarchar(128) NOT NULL, + WS_TABLE nvarchar(128) NOT NULL, + WS_PARTITION nvarchar(767), + WS_TXNID bigint NOT NULL, + WS_COMMIT_ID bigint NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +); +ALTER TABLE TXN_COMPONENTS ADD TC_OPERATION_TYPE char(1) NULL; + +--:r 021-HIVE-13354.mssql.sql +ALTER TABLE COMPACTION_QUEUE ADD CQ_TBLPROPERTIES nvarchar(2048) NULL; +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_TBLPROPERTIES nvarchar(2048) NULL; + + +UPDATE VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0' AS MESSAGE; diff --git standalone-metastore/src/main/sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql standalone-metastore/src/main/sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql new file mode 100644 index 0000000000..64d8fca572 --- /dev/null +++ standalone-metastore/src/main/sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql @@ -0,0 +1,43 @@ +SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE; + +--:r 022-HIVE-14496.mssql.sql +ALTER TABLE TBLS ADD IS_REWRITE_ENABLED bit NOT NULL DEFAULT(0); + +--:r 023-HIVE-10562.mssql.sql +ALTER TABLE NOTIFICATION_LOG ADD MESSAGE_FORMAT nvarchar(16); + +--:r 024-HIVE-12274.mssql.sql +ALTER TABLE "SERDE_PARAMS" ALTER COLUMN "PARAM_VALUE" nvarchar(MAX); +ALTER TABLE "TABLE_PARAMS" ALTER COLUMN "PARAM_VALUE" nvarchar(MAX); +ALTER TABLE "SD_PARAMS" ALTER COLUMN "PARAM_VALUE" nvarchar(MAX); + +ALTER TABLE "TBLS" ALTER COLUMN "TBL_NAME" nvarchar(256); +ALTER TABLE "NOTIFICATION_LOG" ALTER COLUMN "TBL_NAME" nvarchar(256); +ALTER TABLE "PARTITION_EVENTS" ALTER COLUMN "TBL_NAME" nvarchar(256); +ALTER TABLE "TAB_COL_STATS" ALTER COLUMN "TABLE_NAME" nvarchar(256); +ALTER TABLE "PART_COL_STATS" ALTER COLUMN "TABLE_NAME" nvarchar(256); +ALTER TABLE "COMPLETED_TXN_COMPONENTS" ALTER COLUMN "CTC_TABLE" nvarchar(256); + + +-- A number of indices and constraints reference COLUMN_NAME. These have to be dropped before the not null constraint +-- can be added. +ALTER TABLE COLUMNS_V2 DROP CONSTRAINT COLUMNS_PK; +DROP INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS; +DROP INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS; +DROP INDEX PCS_STATS_IDX ON PART_COL_STATS; + +ALTER TABLE "COLUMNS_V2" ALTER COLUMN "COLUMN_NAME" nvarchar(767) NOT NULL; +ALTER TABLE "PART_COL_PRIVS" ALTER COLUMN "COLUMN_NAME" nvarchar(767) NULL; +ALTER TABLE "TBL_COL_PRIVS" ALTER COLUMN "COLUMN_NAME" nvarchar(767) NULL; +ALTER TABLE "SORT_COLS" ALTER COLUMN "COLUMN_NAME" nvarchar(767) NULL; +ALTER TABLE "TAB_COL_STATS" ALTER COLUMN "COLUMN_NAME" nvarchar(767) NOT NULL; +ALTER TABLE "PART_COL_STATS" ALTER COLUMN "COLUMN_NAME" nvarchar(767) NOT NULL; + +-- Put back the indices and constraints we dropped. +ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME"); +CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE); +CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE); +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); + +UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE; diff --git standalone-metastore/src/main/sql/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql standalone-metastore/src/main/sql/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql new file mode 100644 index 0000000000..feb79a061e --- /dev/null +++ standalone-metastore/src/main/sql/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql @@ -0,0 +1,7 @@ +SELECT 'Upgrading MetaStore schema from 2.2.0 to 2.3.0' AS MESSAGE; + +--:r 025-HIVE-16399.mssql.sql +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +UPDATE VERSION SET SCHEMA_VERSION='2.3.0', VERSION_COMMENT='Hive release version 2.3.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 2.2.0 to 2.3.0' AS MESSAGE; diff --git standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql new file mode 100644 index 0000000000..6dc3e1a091 --- /dev/null +++ standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql @@ -0,0 +1,150 @@ +SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS MESSAGE; + +-- :r 026-HIVE-16556.mssql.sql +CREATE TABLE METASTORE_DB_PROPERTIES ( + PROPERTY_KEY VARCHAR(255) NOT NULL, + PROPERTY_VALUE VARCHAR(1000) NOT NULL, + DESCRIPTION VARCHAR(1000) +); + +ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY); + +--:r 027-HIVE-16575.mssql.sql +CREATE INDEX CONSTRAINTS_CONSTRAINT_TYPE_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE); + +--:r 028-HIVE-16922.mssql.sql +UPDATE SERDE_PARAMS +SET PARAM_KEY='collection.delim' +WHERE PARAM_KEY='colelction.delim'; + +--:r 029-HIVE-16997.mssql.sql +ALTER TABLE PART_COL_STATS ADD BIT_VECTOR VARBINARY(MAX); +ALTER TABLE TAB_COL_STATS ADD BIT_VECTOR VARBINARY(MAX); + +--:r 030-HIVE-16886.mssql.sql +INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 WHERE NOT EXISTS (SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE); + +--:r 031-HIVE-17566.mssql.sql +CREATE TABLE WM_RESOURCEPLAN +( + RP_ID bigint NOT NULL, + "NAME" nvarchar(128) NOT NULL, + QUERY_PARALLELISM int, + STATUS nvarchar(20) NOT NULL, + DEFAULT_POOL_ID bigint +); + +ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME"); + + +CREATE TABLE WM_POOL +( + POOL_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + PATH nvarchar(1024) NOT NULL, + ALLOC_FRACTION float, + QUERY_PARALLELISM int, + SCHEDULING_POLICY nvarchar(1024) +); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH); +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + + +CREATE TABLE WM_TRIGGER +( + TRIGGER_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + "NAME" nvarchar(128) NOT NULL, + TRIGGER_EXPRESSION nvarchar(1024), + ACTION_EXPRESSION nvarchar(1024), + IS_IN_UNMANAGED bit NOT NULL DEFAULT 0 +); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME"); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + + +CREATE TABLE WM_POOL_TO_TRIGGER +( + POOL_ID bigint NOT NULL, + TRIGGER_ID bigint NOT NULL +); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID); + + +CREATE TABLE WM_MAPPING +( + MAPPING_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + ENTITY_TYPE nvarchar(128) NOT NULL, + ENTITY_NAME nvarchar(128) NOT NULL, + POOL_ID bigint, + ORDERING int +); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + +UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS MESSAGE; + +-- 033-HIVE-14498 +CREATE TABLE MV_CREATION_METADATA +( + MV_CREATION_METADATA_ID bigint NOT NULL, + DB_NAME nvarchar(128) NOT NULL, + TBL_NAME nvarchar(256) NOT NULL, + TXN_LIST text NULL +); + +CREATE TABLE MV_TABLES_USED +( + MV_CREATION_METADATA_ID bigint NOT NULL, + TBL_ID bigint NOT NULL +); + +ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID); + +CREATE INDEX MV_UNIQUE_TABLE ON MV_CREATION_METADATA (TBL_NAME,DB_NAME); + +ALTER TABLE MV_TABLES_USED ADD FOREIGN KEY(MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID); + +ALTER TABLE MV_TABLES_USED ADD FOREIGN KEY(TBL_ID) REFERENCES TBLS (TBL_ID); + +ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_TIMESTAMP datetime2 NOT NULL DEFAULT(CURRENT_TIMESTAMP); +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +-- 034-HIVE-18489.mssql.sql +UPDATE FUNC_RU + SET RESOURCE_URI = 's3a' + SUBSTRING(RESOURCE_URI, 4, LEN(RESOURCE_URI)) + WHERE RESOURCE_URI LIKE 's3n://%' ; + +UPDATE SKEWED_COL_VALUE_LOC_MAP + SET LOCATION = 's3a' + SUBSTRING(LOCATION, 4, LEN(LOCATION)) + WHERE LOCATION LIKE 's3n://%' ; + +UPDATE SDS + SET LOCATION = 's3a' + SUBSTRING(LOCATION, 4, LEN(LOCATION)) + WHERE LOCATION LIKE 's3n://%' ; + +UPDATE DBS + SET DB_LOCATION_URI = 's3a' + SUBSTRING(DB_LOCATION_URI, 4, LEN(DB_LOCATION_URI)) + WHERE DB_LOCATION_URI LIKE 's3n://%' ; diff --git standalone-metastore/src/main/sql/mssql/upgrade.order.mssql standalone-metastore/src/main/sql/mssql/upgrade.order.mssql new file mode 100644 index 0000000000..8623683505 --- /dev/null +++ standalone-metastore/src/main/sql/mssql/upgrade.order.mssql @@ -0,0 +1,10 @@ +0.11.0-to-0.12.0 +0.12.0-to-0.13.0 +0.13.0-to-0.14.0 +0.14.0-to-1.1.0 +1.1.0-to-1.2.0 +1.2.0-to-2.0.0 +2.0.0-to-2.1.0 +2.1.0-to-2.2.0 +2.2.0-to-2.3.0 +2.3.0-to-3.0.0 diff --git standalone-metastore/src/main/sql/mysql/create-user.mysql.sql standalone-metastore/src/main/sql/mysql/create-user.mysql.sql new file mode 100644 index 0000000000..811f6f5e99 --- /dev/null +++ standalone-metastore/src/main/sql/mysql/create-user.mysql.sql @@ -0,0 +1,8 @@ +CREATE DATABASE _REPLACE_WITH_DB_; +CREATE USER '_REPLACE_WITH_USER_'@'localhost' IDENTIFIED BY '_REPLACE_WITH_PASSWD_'; +CREATE USER '_REPLACE_WITH_USER_'@'%' IDENTIFIED BY '_REPLACE_WITH_PASSWD_'; +REVOKE ALL PRIVILEGES, GRANT OPTION FROM '_REPLACE_WITH_USER_'@'localhost'; +REVOKE ALL PRIVILEGES, GRANT OPTION FROM '_REPLACE_WITH_USER_'@'%'; +GRANT ALL PRIVILEGES ON _REPLACE_WITH_DB_.* TO '_REPLACE_WITH_USER_'@'localhost'; +GRANT ALL PRIVILEGES ON _REPLACE_WITH_DB_.* TO '_REPLACE_WITH_USER_'@'%'; +FLUSH PRIVILEGES; diff --git standalone-metastore/src/main/sql/mysql/hive-schema-1.2.0.mysql.sql standalone-metastore/src/main/sql/mysql/hive-schema-1.2.0.mysql.sql new file mode 100644 index 0000000000..adf0de72fd --- /dev/null +++ standalone-metastore/src/main/sql/mysql/hive-schema-1.2.0.mysql.sql @@ -0,0 +1,910 @@ +-- MySQL dump 10.13 Distrib 5.5.25, for osx10.6 (i386) +-- +-- Host: localhost Database: test +-- ------------------------------------------------------ +-- Server version 5.5.25 + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + +-- +-- Table structure for table `BUCKETING_COLS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` ( + `SD_ID` bigint(20) NOT NULL, + `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID`,`INTEGER_IDX`), + KEY `BUCKETING_COLS_N49` (`SD_ID`), + CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `CDS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `CDS` ( + `CD_ID` bigint(20) NOT NULL, + PRIMARY KEY (`CD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `COLUMNS_V2` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `COLUMNS_V2` ( + `CD_ID` bigint(20) NOT NULL, + `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TYPE_NAME` varchar(4000) DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`CD_ID`,`COLUMN_NAME`), + KEY `COLUMNS_V2_N49` (`CD_ID`), + CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `DATABASE_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` ( + `DB_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`DB_ID`,`PARAM_KEY`), + KEY `DATABASE_PARAMS_N49` (`DB_ID`), + CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `DBS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `DBS` ( + `DB_ID` bigint(20) NOT NULL, + `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`DB_ID`), + UNIQUE KEY `UNIQUE_DATABASE` (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `DB_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `DB_PRIVS` ( + `DB_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `DB_ID` bigint(20) DEFAULT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`DB_GRANT_ID`), + UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `DB_PRIVS_N49` (`DB_ID`), + CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `GLOBAL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` ( + `USER_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`USER_GRANT_ID`), + UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `IDXS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `IDXS` ( + `INDEX_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `DEFERRED_REBUILD` bit(1) NOT NULL, + `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INDEX_TBL_ID` bigint(20) DEFAULT NULL, + `LAST_ACCESS_TIME` int(11) NOT NULL, + `ORIG_TBL_ID` bigint(20) DEFAULT NULL, + `SD_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`INDEX_ID`), + UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`), + KEY `IDXS_N51` (`SD_ID`), + KEY `IDXS_N50` (`INDEX_TBL_ID`), + KEY `IDXS_N49` (`ORIG_TBL_ID`), + CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`), + CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`), + CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `INDEX_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` ( + `INDEX_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`), + KEY `INDEX_PARAMS_N49` (`INDEX_ID`), + CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `NUCLEUS_TABLES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` ( + `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`CLASS_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITIONS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITIONS` ( + `PART_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `LAST_ACCESS_TIME` int(11) NOT NULL, + `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SD_ID` bigint(20) DEFAULT NULL, + `TBL_ID` bigint(20) DEFAULT NULL, + `LINK_TARGET_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`PART_ID`), + UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`), + KEY `PARTITIONS_N49` (`TBL_ID`), + KEY `PARTITIONS_N50` (`SD_ID`), + KEY `PARTITIONS_N51` (`LINK_TARGET_ID`), + CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`), + CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`), + CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_EVENTS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` ( + `PART_NAME_ID` bigint(20) NOT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `EVENT_TIME` bigint(20) NOT NULL, + `EVENT_TYPE` int(11) NOT NULL, + `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_NAME_ID`), + KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_KEYS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` ( + `TBL_ID` bigint(20) NOT NULL, + `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`TBL_ID`,`PKEY_NAME`), + KEY `PARTITION_KEYS_N49` (`TBL_ID`), + CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_KEY_VALS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` ( + `PART_ID` bigint(20) NOT NULL, + `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`PART_ID`,`INTEGER_IDX`), + KEY `PARTITION_KEY_VALS_N49` (`PART_ID`), + CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` ( + `PART_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_ID`,`PARAM_KEY`), + KEY `PARTITION_PARAMS_N49` (`PART_ID`), + CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PART_COL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` ( + `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL, + `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_ID` bigint(20) DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_COLUMN_GRANT_ID`), + KEY `PART_COL_PRIVS_N49` (`PART_ID`), + KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PART_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PART_PRIVS` ( + `PART_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_ID` bigint(20) DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_GRANT_ID`), + KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `PART_PRIVS_N49` (`PART_ID`), + CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `ROLES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `ROLES` ( + `ROLE_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`ROLE_ID`), + UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `ROLE_MAP` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `ROLE_MAP` ( + `ROLE_GRANT_ID` bigint(20) NOT NULL, + `ADD_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `ROLE_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`ROLE_GRANT_ID`), + UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `ROLE_MAP_N49` (`ROLE_ID`), + CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SDS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SDS` ( + `SD_ID` bigint(20) NOT NULL, + `CD_ID` bigint(20) DEFAULT NULL, + `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `IS_COMPRESSED` bit(1) NOT NULL, + `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL, + `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `NUM_BUCKETS` int(11) NOT NULL, + `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SERDE_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`SD_ID`), + KEY `SDS_N49` (`SERDE_ID`), + KEY `SDS_N50` (`CD_ID`), + CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`), + CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SD_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SD_PARAMS` ( + `SD_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`SD_ID`,`PARAM_KEY`), + KEY `SD_PARAMS_N49` (`SD_ID`), + CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SEQUENCE_TABLE` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` ( + `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `NEXT_VAL` bigint(20) NOT NULL, + PRIMARY KEY (`SEQUENCE_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SERDES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SERDES` ( + `SERDE_ID` bigint(20) NOT NULL, + `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`SERDE_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SERDE_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` ( + `SERDE_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`), + KEY `SERDE_PARAMS_N49` (`SERDE_ID`), + CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_COL_NAMES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` ( + `SD_ID` bigint(20) NOT NULL, + `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID`,`INTEGER_IDX`), + KEY `SKEWED_COL_NAMES_N49` (`SD_ID`), + CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` ( + `SD_ID` bigint(20) NOT NULL, + `STRING_LIST_ID_KID` bigint(20) NOT NULL, + `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`), + KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`), + KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`), + CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`), + CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_STRING_LIST` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` ( + `STRING_LIST_ID` bigint(20) NOT NULL, + PRIMARY KEY (`STRING_LIST_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_STRING_LIST_VALUES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` ( + `STRING_LIST_ID` bigint(20) NOT NULL, + `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`), + KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`), + CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_VALUES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` ( + `SD_ID_OID` bigint(20) NOT NULL, + `STRING_LIST_ID_EID` bigint(20) NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`), + KEY `SKEWED_VALUES_N50` (`SD_ID_OID`), + KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`), + CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`), + CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SORT_COLS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SORT_COLS` ( + `SD_ID` bigint(20) NOT NULL, + `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `ORDER` int(11) NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID`,`INTEGER_IDX`), + KEY `SORT_COLS_N49` (`SD_ID`), + CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TABLE_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` ( + `TBL_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`TBL_ID`,`PARAM_KEY`), + KEY `TABLE_PARAMS_N49` (`TBL_ID`), + CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TBLS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TBLS` ( + `TBL_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `DB_ID` bigint(20) DEFAULT NULL, + `LAST_ACCESS_TIME` int(11) NOT NULL, + `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `RETENTION` int(11) NOT NULL, + `SD_ID` bigint(20) DEFAULT NULL, + `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `VIEW_EXPANDED_TEXT` mediumtext, + `VIEW_ORIGINAL_TEXT` mediumtext, + `LINK_TARGET_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`TBL_ID`), + UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`), + KEY `TBLS_N50` (`SD_ID`), + KEY `TBLS_N49` (`DB_ID`), + KEY `TBLS_N51` (`LINK_TARGET_ID`), + CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`), + CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`), + CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TBL_COL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` ( + `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL, + `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`TBL_COLUMN_GRANT_ID`), + KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `TBL_COL_PRIVS_N49` (`TBL_ID`), + CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TBL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TBL_PRIVS` ( + `TBL_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`TBL_GRANT_ID`), + KEY `TBL_PRIVS_N49` (`TBL_ID`), + KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TAB_COL_STATS` +-- +CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` ( + `CS_ID` bigint(20) NOT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TBL_ID` bigint(20) NOT NULL, + `LONG_LOW_VALUE` bigint(20), + `LONG_HIGH_VALUE` bigint(20), + `DOUBLE_HIGH_VALUE` double(53,4), + `DOUBLE_LOW_VALUE` double(53,4), + `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `NUM_NULLS` bigint(20) NOT NULL, + `NUM_DISTINCTS` bigint(20), + `AVG_COL_LEN` double(53,4), + `MAX_COL_LEN` bigint(20), + `NUM_TRUES` bigint(20), + `NUM_FALSES` bigint(20), + `LAST_ANALYZED` bigint(20) NOT NULL, + PRIMARY KEY (`CS_ID`), + CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- +-- Table structure for table `PART_COL_STATS` +-- +CREATE TABLE IF NOT EXISTS `PART_COL_STATS` ( + `CS_ID` bigint(20) NOT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PART_ID` bigint(20) NOT NULL, + `LONG_LOW_VALUE` bigint(20), + `LONG_HIGH_VALUE` bigint(20), + `DOUBLE_HIGH_VALUE` double(53,4), + `DOUBLE_LOW_VALUE` double(53,4), + `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `NUM_NULLS` bigint(20) NOT NULL, + `NUM_DISTINCTS` bigint(20), + `AVG_COL_LEN` double(53,4), + `MAX_COL_LEN` bigint(20), + `NUM_TRUES` bigint(20), + `NUM_FALSES` bigint(20), + `LAST_ANALYZED` bigint(20) NOT NULL, + PRIMARY KEY (`CS_ID`), + CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE; + +-- +-- Table structure for table `TYPES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TYPES` ( + `TYPES_ID` bigint(20) NOT NULL, + `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`TYPES_ID`), + UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TYPE_FIELDS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` ( + `TYPE_NAME` bigint(20) NOT NULL, + `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`), + KEY `TYPE_FIELDS_N49` (`TYPE_NAME`), + CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey] +CREATE TABLE IF NOT EXISTS `MASTER_KEYS` +( + `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT, + `MASTER_KEY` VARCHAR(767) BINARY NULL, + PRIMARY KEY (`KEY_ID`) +) ENGINE=INNODB DEFAULT CHARSET=latin1; + +-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken] +CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS` +( + `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL, + `TOKEN` VARCHAR(767) BINARY NULL, + PRIMARY KEY (`TOKEN_IDENT`) +) ENGINE=INNODB DEFAULT CHARSET=latin1; + +-- +-- Table structure for VERSION +-- +CREATE TABLE IF NOT EXISTS `VERSION` ( + `VER_ID` BIGINT NOT NULL, + `SCHEMA_VERSION` VARCHAR(127) NOT NULL, + `VERSION_COMMENT` VARCHAR(255), + PRIMARY KEY (`VER_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- +-- Table structure for table FUNCS +-- +CREATE TABLE IF NOT EXISTS `FUNCS` ( + `FUNC_ID` BIGINT(20) NOT NULL, + `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `CREATE_TIME` INT(11) NOT NULL, + `DB_ID` BIGINT(20), + `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin, + `FUNC_TYPE` INT(11) NOT NULL, + `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin, + `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin, + PRIMARY KEY (`FUNC_ID`), + UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`), + KEY `FUNCS_N49` (`DB_ID`), + CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- +-- Table structure for table FUNC_RU +-- +CREATE TABLE IF NOT EXISTS `FUNC_RU` ( + `FUNC_ID` BIGINT(20) NOT NULL, + `RESOURCE_TYPE` INT(11) NOT NULL, + `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `INTEGER_IDX` INT(11) NOT NULL, + PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`), + CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG` +( + `NL_ID` BIGINT(20) NOT NULL, + `EVENT_ID` BIGINT(20) NOT NULL, + `EVENT_TIME` INT(11) NOT NULL, + `EVENT_TYPE` varchar(32) NOT NULL, + `DB_NAME` varchar(128), + `TBL_NAME` varchar(128), + `MESSAGE` mediumtext, + PRIMARY KEY (`NL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE` +( + `NNI_ID` BIGINT(20) NOT NULL, + `NEXT_EVENT_ID` BIGINT(20) NOT NULL, + PRIMARY KEY (`NNI_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + + + +-- ---------------------------- +-- Transaction and Lock Tables +-- ---------------------------- +CREATE TABLE TXNS ( + TXN_ID bigint PRIMARY KEY, + TXN_STATE char(1) NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID bigint, + TC_DATABASE varchar(128) NOT NULL, + TC_TABLE varchar(128), + TC_PARTITION varchar(767), + FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID bigint, + CTC_DATABASE varchar(128) NOT NULL, + CTC_TABLE varchar(128), + CTC_PARTITION varchar(767) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE NEXT_TXN_ID ( + NTXN_NEXT bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO NEXT_TXN_ID VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint, + HL_DB varchar(128) NOT NULL, + HL_TABLE varchar(128), + HL_PARTITION varchar(767), + HL_LOCK_STATE char(1) not null, + HL_LOCK_TYPE char(1) not null, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint, + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID), + KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID); + +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID bigint PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(128) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_WORKER_ID varchar(128), + CQ_START bigint, + CQ_RUN_AS varchar(128) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + + + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '1.2.0', 'Hive release version 1.2.0'); + +/*!40101 SET character_set_client = @saved_cs_client */; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +-- Dump completed on 2012-08-23 0:56:31 diff --git metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql similarity index 91% rename from metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql rename to standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql index eb5da4a047..0eb2e2e4eb 100644 --- metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql @@ -944,8 +944,125 @@ CREATE TABLE IF NOT EXISTS WM_MAPPING -- ---------------------------- -- Transaction and Lock Tables -- ---------------------------- -SOURCE hive-txn-schema-3.0.0.mysql.sql; +CREATE TABLE TXNS ( + TXN_ID bigint PRIMARY KEY, + TXN_STATE char(1) NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL, + TXN_AGENT_INFO varchar(128), + TXN_META_INFO varchar(128), + TXN_HEARTBEAT_COUNT int +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID bigint NOT NULL, + TC_DATABASE varchar(128) NOT NULL, + TC_TABLE varchar(128) NOT NULL, + TC_PARTITION varchar(767), + TC_OPERATION_TYPE char(1) NOT NULL, + FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID bigint NOT NULL, + CTC_DATABASE varchar(128) NOT NULL, + CTC_TABLE varchar(256), + CTC_PARTITION varchar(767), + CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX2 ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE; + +CREATE TABLE NEXT_TXN_ID ( + NTXN_NEXT bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO NEXT_TXN_ID VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint, + HL_DB varchar(128) NOT NULL, + HL_TABLE varchar(128), + HL_PARTITION varchar(767), + HL_LOCK_STATE char(1) not null, + HL_LOCK_TYPE char(1) not null, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint, + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + HL_HEARTBEAT_COUNT int, + HL_AGENT_INFO varchar(128), + HL_BLOCKEDBY_EXT_ID bigint, + HL_BLOCKEDBY_INT_ID bigint, + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID), + KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID); +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID bigint PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(128) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_TBLPROPERTIES varchar(2048), + CQ_WORKER_ID varchar(128), + CQ_START bigint, + CQ_RUN_AS varchar(128), + CQ_HIGHEST_TXN_ID bigint, + CQ_META_INFO varbinary(2048), + CQ_HADOOP_JOB_ID varchar(32) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID bigint PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(128) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_TBLPROPERTIES varchar(2048), + CC_WORKER_ID varchar(128), + CC_START bigint, + CC_END bigint, + CC_RUN_AS varchar(128), + CC_HIGHEST_TXN_ID bigint, + CC_META_INFO varbinary(2048), + CC_HADOOP_JOB_ID varchar(32) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar(128) NOT NULL, + MT_KEY2 bigint NOT NULL, + MT_COMMENT varchar(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar(128) NOT NULL, + WS_TABLE varchar(128) NOT NULL, + WS_PARTITION varchar(767), + WS_TXNID bigint NOT NULL, + WS_COMMIT_ID bigint NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script -- ----------------------------------------------------------------- diff --git standalone-metastore/src/main/sql/mysql/upgrade-1.2.0-to-2.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/upgrade-1.2.0-to-2.0.0.mysql.sql new file mode 100644 index 0000000000..1d85e2c6c0 --- /dev/null +++ standalone-metastore/src/main/sql/mysql/upgrade-1.2.0-to-2.0.0.mysql.sql @@ -0,0 +1,75 @@ +SELECT 'Upgrading MetaStore schema from 1.2.0 to 2.0.0' AS ' '; +--SOURCE 021-HIVE-7018.mysql.sql; +ALTER TABLE `TBLS` DROP FOREIGN KEY `TBLS_FK3`; +ALTER TABLE `TBLS` DROP KEY `TBLS_N51`; +ALTER TABLE `TBLS` DROP COLUMN `LINK_TARGET_ID`; + +ALTER TABLE `PARTITIONS` DROP FOREIGN KEY `PARTITIONS_FK3` ; +ALTER TABLE `PARTITIONS` DROP KEY `PARTITIONS_N51` ; +ALTER TABLE `PARTITIONS` DROP COLUMN `LINK_TARGET_ID` ; + +--SOURCE 022-HIVE-11970.mysql.sql; +ALTER TABLE `COLUMNS_V2` MODIFY `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL; +ALTER TABLE `PART_COL_PRIVS` MODIFY `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL; +ALTER TABLE `TBL_COL_PRIVS` MODIFY `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL; +ALTER TABLE `SORT_COLS` MODIFY `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL; +ALTER TABLE `TAB_COL_STATS` MODIFY `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL; +ALTER TABLE `PART_COL_STATS` MODIFY `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL; + +--SOURCE 023-HIVE-12807.mysql.sql; +ALTER TABLE `COMPACTION_QUEUE` ADD `CQ_HIGHEST_TXN_ID` bigint; + +--SOURCE 024-HIVE-12814.mysql.sql; +ALTER TABLE `COMPACTION_QUEUE` ADD `CQ_META_INFO` varbinary(2048); + +--SOURCE 025-HIVE-12816.mysql.sql; +ALTER TABLE `COMPACTION_QUEUE` ADD `CQ_HADOOP_JOB_ID` varchar(32); + +--SOURCE 026-HIVE-12818.mysql.sql; +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID bigint PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(128) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_WORKER_ID varchar(128), + CC_START bigint, + CC_END bigint, + CC_RUN_AS varchar(128), + CC_HIGHEST_TXN_ID bigint, + CC_META_INFO varbinary(2048), + CC_HADOOP_JOB_ID varchar(32) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + + + +--SOURCE 027-HIVE-12819.mysql.sql; +ALTER TABLE `TXNS` ADD `TXN_AGENT_INFO` varchar(128); + +--SOURCE 028-HIVE-12821.mysql.sql; +ALTER TABLE `TXNS` ADD `TXN_HEARTBEAT_COUNT` int; +ALTER TABLE `HIVE_LOCKS` ADD `HL_HEARTBEAT_COUNT` int; + +--SOURCE 029-HIVE-12822.mysql.sql; +ALTER TABLE `TXNS` ADD `TXN_META_INFO` varchar(128); + +--SOURCE 030-HIVE-12823.mysql.sql; +ALTER TABLE `HIVE_LOCKS` ADD `HL_AGENT_INFO` varchar(128); + +--SOURCE 031-HIVE-12831.mysql.sql; +ALTER TABLE `HIVE_LOCKS` ADD `HL_BLOCKEDBY_EXT_ID` bigint; +ALTER TABLE `HIVE_LOCKS` ADD `HL_BLOCKEDBY_INT_ID` bigint; + +--SOURCE 032-HIVE-12832.mysql.sql; +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar(128) NOT NULL, + MT_KEY2 bigint NOT NULL, + MT_COMMENT varchar(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + + +UPDATE VERSION SET SCHEMA_VERSION='2.0.0', VERSION_COMMENT='Hive release version 2.0.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 1.2.0 to 2.0.0' AS ' '; + diff --git standalone-metastore/src/main/sql/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql standalone-metastore/src/main/sql/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql new file mode 100644 index 0000000000..456d4cd2ed --- /dev/null +++ standalone-metastore/src/main/sql/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql @@ -0,0 +1,42 @@ +SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0' AS ' '; + +--SOURCE 034-HIVE-13076.mysql.sql; +CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS` +( + `CHILD_CD_ID` BIGINT, + `CHILD_INTEGER_IDX` INT(11), + `CHILD_TBL_ID` BIGINT, + `PARENT_CD_ID` BIGINT NOT NULL, + `PARENT_INTEGER_IDX` INT(11) NOT NULL, + `PARENT_TBL_ID` BIGINT NOT NULL, + `POSITION` BIGINT NOT NULL, + `CONSTRAINT_NAME` VARCHAR(400) NOT NULL, + `CONSTRAINT_TYPE` SMALLINT(6) NOT NULL, + `UPDATE_RULE` SMALLINT(6), + `DELETE_RULE` SMALLINT(6), + `ENABLE_VALIDATE_RELY` SMALLINT(6) NOT NULL, + PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +CREATE INDEX `CONSTRAINTS_PARENT_TABLE_ID_INDEX` ON KEY_CONSTRAINTS (`PARENT_TBL_ID`) USING BTREE; + + + +--SOURCE 035-HIVE-13395.mysql.sql; +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar(128) NOT NULL, + WS_TABLE varchar(128) NOT NULL, + WS_PARTITION varchar(767), + WS_TXNID bigint NOT NULL, + WS_COMMIT_ID bigint NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +ALTER TABLE TXN_COMPONENTS ADD TC_OPERATION_TYPE char(1); + +--SOURCE 036-HIVE-13354.mysql.sql; +ALTER TABLE COMPACTION_QUEUE ADD CQ_TBLPROPERTIES varchar(2048); +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_TBLPROPERTIES varchar(2048); + +UPDATE VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0' AS ' '; + diff --git standalone-metastore/src/main/sql/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql standalone-metastore/src/main/sql/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql new file mode 100644 index 0000000000..b114587d1e --- /dev/null +++ standalone-metastore/src/main/sql/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql @@ -0,0 +1,43 @@ +SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' '; + +--SOURCE 037-HIVE-14496.mysql.sql; +-- Step 1: Add the column allowing null +ALTER TABLE `TBLS` ADD `IS_REWRITE_ENABLED` bit(1); + + -- Step 2: Replace the null with default value (false) +UPDATE `TBLS` SET `IS_REWRITE_ENABLED` = false; + +-- Step 3: Alter the column to disallow null values +ALTER TABLE `TBLS` MODIFY COLUMN `IS_REWRITE_ENABLED` bit(1) NOT NULL DEFAULT 0; + +--SOURCE 038-HIVE-10562.mysql.sql; +-- Step 1: Add the column for format +ALTER TABLE `NOTIFICATION_LOG` ADD `MESSAGE_FORMAT` varchar(16); +-- if MESSAGE_FORMAT is null, then it is the legacy hcat JSONMessageFactory that created this message + +-- Step 2 : Change the type of the MESSAGE field from mediumtext to longtext +ALTER TABLE `NOTIFICATION_LOG` MODIFY `MESSAGE` longtext; + +--SOURCE 039-HIVE-12274.mysql.sql; +ALTER TABLE COLUMNS_V2 MODIFY TYPE_NAME MEDIUMTEXT; +ALTER TABLE TABLE_PARAMS MODIFY PARAM_VALUE MEDIUMTEXT; +ALTER TABLE SERDE_PARAMS MODIFY PARAM_VALUE MEDIUMTEXT; +ALTER TABLE SD_PARAMS MODIFY PARAM_VALUE MEDIUMTEXT; + +ALTER TABLE TBLS MODIFY TBL_NAME varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL; +ALTER TABLE NOTIFICATION_LOG MODIFY TBL_NAME varchar(256) CHARACTER SET latin1 COLLATE latin1_bin; +ALTER TABLE PARTITION_EVENTS MODIFY TBL_NAME varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL; +ALTER TABLE TAB_COL_STATS MODIFY TABLE_NAME varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL; +ALTER TABLE PART_COL_STATS MODIFY TABLE_NAME varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL; +ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY CTC_TABLE varchar(256) CHARACTER SET latin1 COLLATE latin1_bin; + +ALTER TABLE COLUMNS_V2 MODIFY COLUMN_NAME varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL; +ALTER TABLE PART_COL_PRIVS MODIFY COLUMN_NAME varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL; +ALTER TABLE TBL_COL_PRIVS MODIFY COLUMN_NAME varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL; +ALTER TABLE SORT_COLS MODIFY COLUMN_NAME varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL; +ALTER TABLE TAB_COL_STATS MODIFY COLUMN_NAME varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL; +ALTER TABLE PART_COL_STATS MODIFY COLUMN_NAME varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL; + +UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' '; + diff --git standalone-metastore/src/main/sql/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql standalone-metastore/src/main/sql/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql new file mode 100644 index 0000000000..aa5110fb4d --- /dev/null +++ standalone-metastore/src/main/sql/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql @@ -0,0 +1,8 @@ +SELECT 'Upgrading MetaStore schema from 2.2.0 to 2.3.0' AS ' '; + +--SOURCE 040-HIVE-16399.mysql.sql; +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +UPDATE VERSION SET SCHEMA_VERSION='2.3.0', VERSION_COMMENT='Hive release version 2.3.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 2.2.0 to 2.3.0' AS ' '; + diff --git standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql new file mode 100644 index 0000000000..0a170f6fc8 --- /dev/null +++ standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql @@ -0,0 +1,135 @@ +SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' '; + +--SOURCE 041-HIVE-16556.mysql.sql; +-- +-- Table structure for table METASTORE_DB_PROPERTIES +-- +CREATE TABLE IF NOT EXISTS `METASTORE_DB_PROPERTIES` ( + `PROPERTY_KEY` varchar(255) NOT NULL, + `PROPERTY_VALUE` varchar(1000) NOT NULL, + `DESCRIPTION` varchar(1000), + PRIMARY KEY(`PROPERTY_KEY`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +--SOURCE 042-HIVE-16575.mysql.sql; +CREATE INDEX `CONSTRAINTS_CONSTRAINT_TYPE_INDEX` ON KEY_CONSTRAINTS (`CONSTRAINT_TYPE`) USING BTREE; + +--SOURCE 043-HIVE-16922.mysql.sql; +UPDATE SERDE_PARAMS +SET PARAM_KEY='collection.delim' +WHERE PARAM_KEY='colelction.delim'; + +--SOURCE 044-HIVE-16997.mysql.sql; +ALTER TABLE PART_COL_STATS ADD COLUMN BIT_VECTOR BLOB; +ALTER TABLE TAB_COL_STATS ADD COLUMN BIT_VECTOR BLOB; + +--SOURCE 045-HIVE-16886.mysql.sql; +INSERT INTO `NOTIFICATION_SEQUENCE` (`NNI_ID`, `NEXT_EVENT_ID`) SELECT * from (select 1 as `NNI_ID`, 1 as `NOTIFICATION_SEQUENCE`) a WHERE (SELECT COUNT(*) FROM `NOTIFICATION_SEQUENCE`) = 0; + +--SOURCE 046-HIVE-17566.mysql.sql; +CREATE TABLE IF NOT EXISTS WM_RESOURCEPLAN ( + `RP_ID` bigint(20) NOT NULL, + `NAME` varchar(128) NOT NULL, + `QUERY_PARALLELISM` int(11), + `STATUS` varchar(20) NOT NULL, + `DEFAULT_POOL_ID` bigint(20), + PRIMARY KEY (`RP_ID`), + UNIQUE KEY `UNIQUE_WM_RESOURCEPLAN` (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS WM_POOL +( + `POOL_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `PATH` varchar(767) NOT NULL, + `ALLOC_FRACTION` DOUBLE, + `QUERY_PARALLELISM` int(11), + `SCHEDULING_POLICY` varchar(767), + PRIMARY KEY (`POOL_ID`), + UNIQUE KEY `UNIQUE_WM_POOL` (`RP_ID`, `PATH`), + CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +ALTER TABLE `WM_RESOURCEPLAN` ADD CONSTRAINT `WM_RESOURCEPLAN_FK1` FOREIGN KEY (`DEFAULT_POOL_ID`) REFERENCES `WM_POOL`(`POOL_ID`); + +CREATE TABLE IF NOT EXISTS WM_TRIGGER +( + `TRIGGER_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `NAME` varchar(128) NOT NULL, + `TRIGGER_EXPRESSION` varchar(1024), + `ACTION_EXPRESSION` varchar(1024), + `IS_IN_UNMANAGED` bit(1) NOT NULL DEFAULT 0, + PRIMARY KEY (`TRIGGER_ID`), + UNIQUE KEY `UNIQUE_WM_TRIGGER` (`RP_ID`, `NAME`), + CONSTRAINT `WM_TRIGGER_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS WM_POOL_TO_TRIGGER +( + `POOL_ID` bigint(20) NOT NULL, + `TRIGGER_ID` bigint(20) NOT NULL, + PRIMARY KEY (`POOL_ID`, `TRIGGER_ID`), + CONSTRAINT `WM_POOL_TO_TRIGGER_FK1` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`), + CONSTRAINT `WM_POOL_TO_TRIGGER_FK2` FOREIGN KEY (`TRIGGER_ID`) REFERENCES `WM_TRIGGER` (`TRIGGER_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS WM_MAPPING +( + `MAPPING_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `ENTITY_TYPE` varchar(128) NOT NULL, + `ENTITY_NAME` varchar(128) NOT NULL, + `POOL_ID` bigint(20), + `ORDERING` int, + PRIMARY KEY (`MAPPING_ID`), + UNIQUE KEY `UNIQUE_WM_MAPPING` (`RP_ID`, `ENTITY_TYPE`, `ENTITY_NAME`), + CONSTRAINT `WM_MAPPING_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`), + CONSTRAINT `WM_MAPPING_FK2` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' '; + +-- 048-HIVE-14498 +CREATE TABLE IF NOT EXISTS `MV_CREATION_METADATA` ( + `MV_CREATION_METADATA_ID` bigint(20) NOT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TXN_LIST` TEXT DEFAULT NULL, + PRIMARY KEY (`MV_CREATION_METADATA_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX MV_UNIQUE_TABLE ON MV_CREATION_METADATA (TBL_NAME, DB_NAME) USING BTREE; + +CREATE TABLE IF NOT EXISTS `MV_TABLES_USED` ( + `MV_CREATION_METADATA_ID` bigint(20) NOT NULL, + `TBL_ID` bigint(20) NOT NULL, + CONSTRAINT `MV_TABLES_USED_FK1` FOREIGN KEY (`MV_CREATION_METADATA_ID`) REFERENCES `MV_CREATION_METADATA` (`MV_CREATION_METADATA_ID`), + CONSTRAINT `MV_TABLES_USED_FK2` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +ALTER TABLE `COMPLETED_TXN_COMPONENTS` ADD `CTC_TIMESTAMP` timestamp; + +UPDATE `COMPLETED_TXN_COMPONENTS` SET `CTC_TIMESTAMP` = CURRENT_TIMESTAMP; + +ALTER TABLE `COMPLETED_TXN_COMPONENTS` MODIFY COLUMN `CTC_TIMESTAMP` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP; + +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE; + +-- 049-HIVE-18489.mysql.sql +UPDATE FUNC_RU + SET RESOURCE_URI = CONCAT('s3a', SUBSTR(RESOURCE_URI, 4, LENGTH(RESOURCE_URI))) + WHERE RESOURCE_URI LIKE 's3n://%' ; + +UPDATE SKEWED_COL_VALUE_LOC_MAP + SET LOCATION = CONCAT('s3a', SUBSTR(LOCATION, 4, LENGTH(LOCATION))) + WHERE LOCATION LIKE 's3n://%' ; + +UPDATE SDS + SET LOCATION = CONCAT('s3a', SUBSTR(LOCATION, 4, LENGTH(LOCATION))) + WHERE LOCATION LIKE 's3n://%' ; + +UPDATE DBS + SET DB_LOCATION_URI = CONCAT('s3a', SUBSTR(DB_LOCATION_URI, 4, LENGTH(DB_LOCATION_URI))) + WHERE DB_LOCATION_URI LIKE 's3n://%' ; diff --git standalone-metastore/src/main/sql/mysql/upgrade.order.mysql standalone-metastore/src/main/sql/mysql/upgrade.order.mysql new file mode 100644 index 0000000000..d7091b5228 --- /dev/null +++ standalone-metastore/src/main/sql/mysql/upgrade.order.mysql @@ -0,0 +1,16 @@ +0.5.0-to-0.6.0 +0.6.0-to-0.7.0 +0.7.0-to-0.8.0 +0.8.0-to-0.9.0 +0.9.0-to-0.10.0 +0.10.0-to-0.11.0 +0.11.0-to-0.12.0 +0.12.0-to-0.13.0 +0.13.0-to-0.14.0 +0.14.0-to-1.1.0 +1.1.0-to-1.2.0 +1.2.0-to-2.0.0 +2.0.0-to-2.1.0 +2.1.0-to-2.2.0 +2.2.0-to-2.3.0 +2.3.0-to-3.0.0 diff --git standalone-metastore/src/main/sql/oracle/create-user.oracle.sql standalone-metastore/src/main/sql/oracle/create-user.oracle.sql new file mode 100644 index 0000000000..41e8722f3e --- /dev/null +++ standalone-metastore/src/main/sql/oracle/create-user.oracle.sql @@ -0,0 +1,3 @@ +create user _REPLACE_WITH_USER_ identified by _REPLACE_WITH_PASSWD_; +grant connect to _REPLACE_WITH_USER_; +grant all privileges to _REPLACE_WITH_USER_; diff --git standalone-metastore/src/main/sql/oracle/hive-schema-1.2.0.oracle.sql standalone-metastore/src/main/sql/oracle/hive-schema-1.2.0.oracle.sql new file mode 100644 index 0000000000..f67c6b9962 --- /dev/null +++ standalone-metastore/src/main/sql/oracle/hive-schema-1.2.0.oracle.sql @@ -0,0 +1,856 @@ +-- Table SEQUENCE_TABLE is an internal table required by DataNucleus. +-- NOTE: Some versions of SchemaTool do not automatically generate this table. +-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416 +CREATE TABLE SEQUENCE_TABLE +( + SEQUENCE_NAME VARCHAR2(255) NOT NULL, + NEXT_VAL NUMBER NOT NULL +); + +ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME); + +-- Table NUCLEUS_TABLES is an internal table required by DataNucleus. +-- This table is required if datanucleus.autoStartMechanism=SchemaTable +-- NOTE: Some versions of SchemaTool do not automatically generate this table. +-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416 +CREATE TABLE NUCLEUS_TABLES +( + CLASS_NAME VARCHAR2(128) NOT NULL, + TABLE_NAME VARCHAR2(128) NOT NULL, + TYPE VARCHAR2(4) NOT NULL, + OWNER VARCHAR2(2) NOT NULL, + VERSION VARCHAR2(20) NOT NULL, + INTERFACE_NAME VARCHAR2(255) NULL +); + +ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME); + +-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege] +CREATE TABLE PART_COL_PRIVS +( + PART_COLUMN_GRANT_ID NUMBER NOT NULL, + "COLUMN_NAME" VARCHAR2(128) NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PART_ID NUMBER NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + PART_COL_PRIV VARCHAR2(128) NULL +); + +ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID); + +-- Table CDS. +CREATE TABLE CDS +( + CD_ID NUMBER NOT NULL +); + +ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID); + +-- Table COLUMNS_V2 for join relationship +CREATE TABLE COLUMNS_V2 +( + CD_ID NUMBER NOT NULL, + "COMMENT" VARCHAR2(256) NULL, + "COLUMN_NAME" VARCHAR2(128) NOT NULL, + TYPE_NAME VARCHAR2(4000) NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME"); + +-- Table PARTITION_KEY_VALS for join relationship +CREATE TABLE PARTITION_KEY_VALS +( + PART_ID NUMBER NOT NULL, + PART_KEY_VAL VARCHAR2(256) NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX); + +-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase] +CREATE TABLE DBS +( + DB_ID NUMBER NOT NULL, + "DESC" VARCHAR2(4000) NULL, + DB_LOCATION_URI VARCHAR2(4000) NOT NULL, + "NAME" VARCHAR2(128) NULL, + OWNER_NAME VARCHAR2(128) NULL, + OWNER_TYPE VARCHAR2(10) NULL +); + +ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID); + +-- Table PARTITION_PARAMS for join relationship +CREATE TABLE PARTITION_PARAMS +( + PART_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(256) NOT NULL, + PARAM_VALUE VARCHAR2(4000) NULL +); + +ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY); + +-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo] +CREATE TABLE SERDES +( + SERDE_ID NUMBER NOT NULL, + "NAME" VARCHAR2(128) NULL, + SLIB VARCHAR2(4000) NULL +); + +ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID); + +-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType] +CREATE TABLE TYPES +( + TYPES_ID NUMBER NOT NULL, + TYPE_NAME VARCHAR2(128) NULL, + TYPE1 VARCHAR2(767) NULL, + TYPE2 VARCHAR2(767) NULL +); + +ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID); + +-- Table PARTITION_KEYS for join relationship +CREATE TABLE PARTITION_KEYS +( + TBL_ID NUMBER NOT NULL, + PKEY_COMMENT VARCHAR2(4000) NULL, + PKEY_NAME VARCHAR2(128) NOT NULL, + PKEY_TYPE VARCHAR2(767) NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME); + +-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole] +CREATE TABLE ROLES +( + ROLE_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + OWNER_NAME VARCHAR2(128) NULL, + ROLE_NAME VARCHAR2(128) NULL +); + +ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID); + +-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition] +CREATE TABLE PARTITIONS +( + PART_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + LAST_ACCESS_TIME NUMBER (10) NOT NULL, + PART_NAME VARCHAR2(767) NULL, + SD_ID NUMBER NULL, + TBL_ID NUMBER NULL +); + +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID); + +-- Table INDEX_PARAMS for join relationship +CREATE TABLE INDEX_PARAMS +( + INDEX_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(256) NOT NULL, + PARAM_VALUE VARCHAR2(4000) NULL +); + +ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY); + +-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] +CREATE TABLE TBL_COL_PRIVS +( + TBL_COLUMN_GRANT_ID NUMBER NOT NULL, + "COLUMN_NAME" VARCHAR2(128) NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + TBL_COL_PRIV VARCHAR2(128) NULL, + TBL_ID NUMBER NULL +); + +ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID); + +-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex] +CREATE TABLE IDXS +( + INDEX_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)), + INDEX_HANDLER_CLASS VARCHAR2(4000) NULL, + INDEX_NAME VARCHAR2(128) NULL, + INDEX_TBL_ID NUMBER NULL, + LAST_ACCESS_TIME NUMBER (10) NOT NULL, + ORIG_TBL_ID NUMBER NULL, + SD_ID NUMBER NULL +); + +ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID); + +-- Table BUCKETING_COLS for join relationship +CREATE TABLE BUCKETING_COLS +( + SD_ID NUMBER NOT NULL, + BUCKET_COL_NAME VARCHAR2(256) NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table TYPE_FIELDS for join relationship +CREATE TABLE TYPE_FIELDS +( + TYPE_NAME NUMBER NOT NULL, + "COMMENT" VARCHAR2(256) NULL, + FIELD_NAME VARCHAR2(128) NOT NULL, + FIELD_TYPE VARCHAR2(767) NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME); + +-- Table SD_PARAMS for join relationship +CREATE TABLE SD_PARAMS +( + SD_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(256) NOT NULL, + PARAM_VALUE VARCHAR2(4000) NULL +); + +ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY); + +-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege] +CREATE TABLE GLOBAL_PRIVS +( + USER_GRANT_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + USER_PRIV VARCHAR2(128) NULL +); + +ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID); + +-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor] +CREATE TABLE SDS +( + SD_ID NUMBER NOT NULL, + CD_ID NUMBER NULL, + INPUT_FORMAT VARCHAR2(4000) NULL, + IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)), + LOCATION VARCHAR2(4000) NULL, + NUM_BUCKETS NUMBER (10) NOT NULL, + OUTPUT_FORMAT VARCHAR2(4000) NULL, + SERDE_ID NUMBER NULL, + IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0)) +); + +ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID); + +-- Table TABLE_PARAMS for join relationship +CREATE TABLE TABLE_PARAMS +( + TBL_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(256) NOT NULL, + PARAM_VALUE VARCHAR2(4000) NULL +); + +ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY); + +-- Table SORT_COLS for join relationship +CREATE TABLE SORT_COLS +( + SD_ID NUMBER NOT NULL, + "COLUMN_NAME" VARCHAR2(128) NULL, + "ORDER" NUMBER (10) NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege] +CREATE TABLE TBL_PRIVS +( + TBL_GRANT_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + TBL_PRIV VARCHAR2(128) NULL, + TBL_ID NUMBER NULL +); + +ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID); + +-- Table DATABASE_PARAMS for join relationship +CREATE TABLE DATABASE_PARAMS +( + DB_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(180) NOT NULL, + PARAM_VALUE VARCHAR2(4000) NULL +); + +ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY); + +-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap] +CREATE TABLE ROLE_MAP +( + ROLE_GRANT_ID NUMBER NOT NULL, + ADD_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + ROLE_ID NUMBER NULL +); + +ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID); + +-- Table SERDE_PARAMS for join relationship +CREATE TABLE SERDE_PARAMS +( + SERDE_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(256) NOT NULL, + PARAM_VALUE VARCHAR2(4000) NULL +); + +ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY); + +-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege] +CREATE TABLE PART_PRIVS +( + PART_GRANT_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PART_ID NUMBER NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + PART_PRIV VARCHAR2(128) NULL +); + +ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID); + +-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege] +CREATE TABLE DB_PRIVS +( + DB_GRANT_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + DB_ID NUMBER NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + DB_PRIV VARCHAR2(128) NULL +); + +ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID); + +-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable] +CREATE TABLE TBLS +( + TBL_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + DB_ID NUMBER NULL, + LAST_ACCESS_TIME NUMBER (10) NOT NULL, + OWNER VARCHAR2(767) NULL, + RETENTION NUMBER (10) NOT NULL, + SD_ID NUMBER NULL, + TBL_NAME VARCHAR2(128) NULL, + TBL_TYPE VARCHAR2(128) NULL, + VIEW_EXPANDED_TEXT CLOB NULL, + VIEW_ORIGINAL_TEXT CLOB NULL +); + +ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); + +-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent] +CREATE TABLE PARTITION_EVENTS +( + PART_NAME_ID NUMBER NOT NULL, + DB_NAME VARCHAR2(128) NULL, + EVENT_TIME NUMBER NOT NULL, + EVENT_TYPE NUMBER (10) NOT NULL, + PARTITION_NAME VARCHAR2(767) NULL, + TBL_NAME VARCHAR2(128) NULL +); + +ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID); + +-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList] +CREATE TABLE SKEWED_STRING_LIST +( + STRING_LIST_ID NUMBER NOT NULL +); + +ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID); + +CREATE TABLE SKEWED_STRING_LIST_VALUES +( + STRING_LIST_ID NUMBER NOT NULL, + "STRING_LIST_VALUE" VARCHAR2(256) NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX); + +ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ; + +CREATE TABLE SKEWED_COL_NAMES +( + SD_ID NUMBER NOT NULL, + "SKEWED_COL_NAME" VARCHAR2(256) NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE TABLE SKEWED_COL_VALUE_LOC_MAP +( + SD_ID NUMBER NOT NULL, + STRING_LIST_ID_KID NUMBER NOT NULL, + "LOCATION" VARCHAR2(4000) NULL +); + +CREATE TABLE MASTER_KEYS +( + KEY_ID NUMBER (10) NOT NULL, + MASTER_KEY VARCHAR2(767) NULL +); + +CREATE TABLE DELEGATION_TOKENS +( + TOKEN_IDENT VARCHAR2(767) NOT NULL, + TOKEN VARCHAR2(767) NULL +); + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID); + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ; + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE TABLE SKEWED_VALUES +( + SD_ID_OID NUMBER NOT NULL, + STRING_LIST_ID_EID NUMBER NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX); + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ; + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +-- column statistics + +CREATE TABLE TAB_COL_STATS ( + CS_ID NUMBER NOT NULL, + DB_NAME VARCHAR2(128) NOT NULL, + TABLE_NAME VARCHAR2(128) NOT NULL, + COLUMN_NAME VARCHAR2(128) NOT NULL, + COLUMN_TYPE VARCHAR2(128) NOT NULL, + TBL_ID NUMBER NOT NULL, + LONG_LOW_VALUE NUMBER, + LONG_HIGH_VALUE NUMBER, + DOUBLE_LOW_VALUE NUMBER, + DOUBLE_HIGH_VALUE NUMBER, + BIG_DECIMAL_LOW_VALUE VARCHAR2(4000), + BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000), + NUM_NULLS NUMBER NOT NULL, + NUM_DISTINCTS NUMBER, + AVG_COL_LEN NUMBER, + MAX_COL_LEN NUMBER, + NUM_TRUES NUMBER, + NUM_FALSES NUMBER, + LAST_ANALYZED NUMBER NOT NULL +); + +CREATE TABLE VERSION ( + VER_ID NUMBER NOT NULL, + SCHEMA_VERSION VARCHAR(127) NOT NULL, + VERSION_COMMENT VARCHAR(255) +); +ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID); + +ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID); + +ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID); + +CREATE TABLE PART_COL_STATS ( + CS_ID NUMBER NOT NULL, + DB_NAME VARCHAR2(128) NOT NULL, + TABLE_NAME VARCHAR2(128) NOT NULL, + PARTITION_NAME VARCHAR2(767) NOT NULL, + COLUMN_NAME VARCHAR2(128) NOT NULL, + COLUMN_TYPE VARCHAR2(128) NOT NULL, + PART_ID NUMBER NOT NULL, + LONG_LOW_VALUE NUMBER, + LONG_HIGH_VALUE NUMBER, + DOUBLE_LOW_VALUE NUMBER, + DOUBLE_HIGH_VALUE NUMBER, + BIG_DECIMAL_LOW_VALUE VARCHAR2(4000), + BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000), + NUM_NULLS NUMBER NOT NULL, + NUM_DISTINCTS NUMBER, + AVG_COL_LEN NUMBER, + MAX_COL_LEN NUMBER, + NUM_TRUES NUMBER, + NUM_FALSES NUMBER, + LAST_ANALYZED NUMBER NOT NULL +); + +ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID); + +ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED; + +CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID); + +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); + +CREATE TABLE FUNCS ( + FUNC_ID NUMBER NOT NULL, + CLASS_NAME VARCHAR2(4000), + CREATE_TIME NUMBER(10) NOT NULL, + DB_ID NUMBER, + FUNC_NAME VARCHAR2(128), + FUNC_TYPE NUMBER(10) NOT NULL, + OWNER_NAME VARCHAR2(128), + OWNER_TYPE VARCHAR2(10) +); + +ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID); + +CREATE TABLE FUNC_RU ( + FUNC_ID NUMBER NOT NULL, + RESOURCE_TYPE NUMBER(10) NOT NULL, + RESOURCE_URI VARCHAR2(4000), + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX); + +CREATE TABLE NOTIFICATION_LOG +( + NL_ID NUMBER NOT NULL, + EVENT_ID NUMBER NOT NULL, + EVENT_TIME NUMBER(10) NOT NULL, + EVENT_TYPE VARCHAR2(32) NOT NULL, + DB_NAME VARCHAR2(128), + TBL_NAME VARCHAR2(128), + MESSAGE CLOB NULL +); + +ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID); + +CREATE TABLE NOTIFICATION_SEQUENCE +( + NNI_ID NUMBER NOT NULL, + NEXT_EVENT_ID NUMBER NOT NULL +); + +ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID); + + + +-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege] +ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ; + +CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID); + +CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table COLUMNS_V2 +ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ; + +CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID); + + +-- Constraints for table PARTITION_KEY_VALS +ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID); + + +-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase] +CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME"); + + +-- Constraints for table PARTITION_PARAMS +ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID); + + +-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo] + +-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType] +CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME); + + +-- Constraints for table PARTITION_KEYS +ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID); + + +-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole] +CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME); + + +-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition] +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID); + +CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID); + +CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID); + + +-- Constraints for table INDEX_PARAMS +ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ; + +CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID); + + +-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] +ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID); + + +-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex] +ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID); + +CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID); + +CREATE INDEX IDXS_N51 ON IDXS (SD_ID); + +CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID); + + +-- Constraints for table BUCKETING_COLS +ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID); + + +-- Constraints for table TYPE_FIELDS +ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ; + +CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME); + + +-- Constraints for table SD_PARAMS +ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID); + + +-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege] +CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor] +ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ; +ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ; + +CREATE INDEX SDS_N49 ON SDS (SERDE_ID); +CREATE INDEX SDS_N50 ON SDS (CD_ID); + + +-- Constraints for table TABLE_PARAMS +ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID); + + +-- Constraints for table SORT_COLS +ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID); + + +-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege] +ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID); + +CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table DATABASE_PARAMS +ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ; + +CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID); + + +-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap] +ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ; + +CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID); + +CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table SERDE_PARAMS +ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ; + +CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID); + + +-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege] +ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID); + + +-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege] +ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ; + +CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID); + + +-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable] +ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ; + +ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX TBLS_N49 ON TBLS (DB_ID); + +CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID); + +CREATE INDEX TBLS_N50 ON TBLS (SD_ID); + + +-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent] +CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME); + + +-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions] +ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED; + +CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID); + +CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID); + + +-- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions] +ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED; + +CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID); + +------------------------------ +-- Transaction and lock tables +------------------------------ +CREATE TABLE TXNS ( + TXN_ID NUMBER(19) PRIMARY KEY, + TXN_STATE char(1) NOT NULL, + TXN_STARTED NUMBER(19) NOT NULL, + TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL +) ROWDEPENDENCIES; + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID), + TC_DATABASE VARCHAR2(128) NOT NULL, + TC_TABLE VARCHAR2(128), + TC_PARTITION VARCHAR2(767) NULL +) ROWDEPENDENCIES; + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID NUMBER(19), + CTC_DATABASE varchar(128) NOT NULL, + CTC_TABLE varchar(128), + CTC_PARTITION varchar(767) +) ROWDEPENDENCIES; + +CREATE TABLE NEXT_TXN_ID ( + NTXN_NEXT NUMBER(19) NOT NULL +); +INSERT INTO NEXT_TXN_ID VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID NUMBER(19) NOT NULL, + HL_LOCK_INT_ID NUMBER(19) NOT NULL, + HL_TXNID NUMBER(19), + HL_DB VARCHAR2(128) NOT NULL, + HL_TABLE VARCHAR2(128), + HL_PARTITION VARCHAR2(767), + HL_LOCK_STATE CHAR(1) NOT NULL, + HL_LOCK_TYPE CHAR(1) NOT NULL, + HL_LAST_HEARTBEAT NUMBER(19) NOT NULL, + HL_ACQUIRED_AT NUMBER(19), + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID) +) ROWDEPENDENCIES; + +CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID); + +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT NUMBER(19) NOT NULL +); +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID NUMBER(19) PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(128) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_WORKER_ID varchar(128), + CQ_START NUMBER(19), + CQ_RUN_AS varchar(128) +) ROWDEPENDENCIES; + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT NUMBER(19) NOT NULL +); +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + + + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '1.2.0', 'Hive release version 1.2.0'); diff --git metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql similarity index 88% rename from metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql rename to standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql index 481d413b38..37f9063993 100644 --- metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql @@ -392,6 +392,8 @@ CREATE TABLE MV_CREATION_METADATA ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID); +CREATE UNIQUE INDEX UNIQUE_TABLE ON MV_CREATION_METADATA ("DB_NAME", "TBL_NAME"); + -- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata] CREATE TABLE MV_TABLES_USED ( @@ -598,34 +600,34 @@ ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMAR CREATE TABLE WM_RESOURCEPLAN ( - RP_ID bigint NOT NULL, - "NAME" nvarchar(128) NOT NULL, - QUERY_PARALLELISM int, - STATUS nvarchar(20) NOT NULL, - DEFAULT_POOL_ID bigint + RP_ID NUMBER NOT NULL, + "NAME" VARCHAR2(128) NOT NULL, + QUERY_PARALLELISM NUMBER(10), + STATUS VARCHAR2(20) NOT NULL, + DEFAULT_POOL_ID NUMBER ); ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID); CREATE TABLE WM_POOL ( - POOL_ID bigint NOT NULL, - RP_ID bigint NOT NULL, - PATH nvarchar(1024) NOT NULL, - ALLOC_FRACTION DOUBLE, - QUERY_PARALLELISM int, - SCHEDULING_POLICY nvarchar(1024) + POOL_ID NUMBER NOT NULL, + RP_ID NUMBER NOT NULL, + PATH VARCHAR2(1024) NOT NULL, + ALLOC_FRACTION NUMBER, + QUERY_PARALLELISM NUMBER(10), + SCHEDULING_POLICY VARCHAR2(1024) ); ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID); CREATE TABLE WM_TRIGGER ( - TRIGGER_ID bigint NOT NULL, - RP_ID bigint NOT NULL, - "NAME" nvarchar(128) NOT NULL, - TRIGGER_EXPRESSION nvarchar(1024), - ACTION_EXPRESSION nvarchar(1024), + TRIGGER_ID NUMBER NOT NULL, + RP_ID NUMBER NOT NULL, + "NAME" VARCHAR2(128) NOT NULL, + TRIGGER_EXPRESSION VARCHAR2(1024), + ACTION_EXPRESSION VARCHAR2(1024), IS_IN_UNMANAGED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_IN_UNMANAGED IN (1,0)) ); @@ -633,20 +635,20 @@ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID); CREATE TABLE WM_POOL_TO_TRIGGER ( - POOL_ID bigint NOT NULL, - TRIGGER_ID bigint NOT NULL + POOL_ID NUMBER NOT NULL, + TRIGGER_ID NUMBER NOT NULL ); ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID); CREATE TABLE WM_MAPPING ( - MAPPING_ID bigint NOT NULL, - RP_ID bigint NOT NULL, - ENTITY_TYPE nvarchar(128) NOT NULL, - ENTITY_NAME nvarchar(128) NOT NULL, - POOL_ID bigint, - ORDERING int + MAPPING_ID NUMBER NOT NULL, + RP_ID NUMBER NOT NULL, + ENTITY_TYPE VARCHAR2(128) NOT NULL, + ENTITY_NAME VARCHAR2(128) NOT NULL, + POOL_ID NUMBER NOT NULL, + ORDERING NUMBER(10) ); ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID); @@ -917,7 +919,123 @@ ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK2 FOREIGN KEY (TBL_ID ------------------------------ -- Transaction and lock tables ------------------------------ -@hive-txn-schema-3.0.0.oracle.sql; +CREATE TABLE TXNS ( + TXN_ID NUMBER(19) PRIMARY KEY, + TXN_STATE char(1) NOT NULL, + TXN_STARTED NUMBER(19) NOT NULL, + TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL, + TXN_AGENT_INFO varchar2(128), + TXN_META_INFO varchar2(128), + TXN_HEARTBEAT_COUNT number(10) +) ROWDEPENDENCIES; + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID), + TC_DATABASE VARCHAR2(128) NOT NULL, + TC_TABLE VARCHAR2(256), + TC_PARTITION VARCHAR2(767) NULL, + TC_OPERATION_TYPE char(1) NOT NULL +) ROWDEPENDENCIES; + +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID NUMBER(19), + CTC_DATABASE VARCHAR2(128) NOT NULL, + CTC_TABLE VARCHAR2(128), + CTC_PARTITION VARCHAR2(767), + CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL +) ROWDEPENDENCIES; + +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +CREATE TABLE NEXT_TXN_ID ( + NTXN_NEXT NUMBER(19) NOT NULL +); +INSERT INTO NEXT_TXN_ID VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID NUMBER(19) NOT NULL, + HL_LOCK_INT_ID NUMBER(19) NOT NULL, + HL_TXNID NUMBER(19), + HL_DB VARCHAR2(128) NOT NULL, + HL_TABLE VARCHAR2(128), + HL_PARTITION VARCHAR2(767), + HL_LOCK_STATE CHAR(1) NOT NULL, + HL_LOCK_TYPE CHAR(1) NOT NULL, + HL_LAST_HEARTBEAT NUMBER(19) NOT NULL, + HL_ACQUIRED_AT NUMBER(19), + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + HL_HEARTBEAT_COUNT number(10), + HL_AGENT_INFO varchar2(128), + HL_BLOCKEDBY_EXT_ID number(19), + HL_BLOCKEDBY_INT_ID number(19), + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID) +) ROWDEPENDENCIES; + +CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID); + +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT NUMBER(19) NOT NULL +); +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID NUMBER(19) PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(128) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_TBLPROPERTIES varchar(2048), + CQ_WORKER_ID varchar(128), + CQ_START NUMBER(19), + CQ_RUN_AS varchar(128), + CQ_HIGHEST_TXN_ID NUMBER(19), + CQ_META_INFO BLOB, + CQ_HADOOP_JOB_ID varchar2(32) +) ROWDEPENDENCIES; + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT NUMBER(19) NOT NULL +); +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID NUMBER(19) PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(128) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_TBLPROPERTIES varchar(2048), + CC_WORKER_ID varchar(128), + CC_START NUMBER(19), + CC_END NUMBER(19), + CC_RUN_AS varchar(128), + CC_HIGHEST_TXN_ID NUMBER(19), + CC_META_INFO BLOB, + CC_HADOOP_JOB_ID varchar2(32) +) ROWDEPENDENCIES; + +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar2(128) NOT NULL, + MT_KEY2 number(19) NOT NULL, + MT_COMMENT varchar2(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +); + +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar2(128) NOT NULL, + WS_TABLE varchar2(128) NOT NULL, + WS_PARTITION varchar2(767), + WS_TXNID number(19) NOT NULL, + WS_COMMIT_ID number(19) NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +); -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script diff --git standalone-metastore/src/main/sql/oracle/upgrade-1.2.0-to-2.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/upgrade-1.2.0-to-2.0.0.oracle.sql new file mode 100644 index 0000000000..7331c8b4db --- /dev/null +++ standalone-metastore/src/main/sql/oracle/upgrade-1.2.0-to-2.0.0.oracle.sql @@ -0,0 +1,83 @@ +SELECT 'Upgrading MetaStore schema from 1.2.0 to 2.0.0' AS Status from dual; + +--@022-HIVE-11970.oracle.sql; +ALTER TABLE COLUMNS_V2 MODIFY ( + "COLUMN_NAME" VARCHAR2(1000) +); + +ALTER TABLE PART_COL_PRIVS MODIFY ( + "COLUMN_NAME" VARCHAR2(1000) +); + +ALTER TABLE TBL_COL_PRIVS MODIFY ( + "COLUMN_NAME" VARCHAR2(1000) +); + +ALTER TABLE SORT_COLS MODIFY ( + "COLUMN_NAME" VARCHAR2(1000) +); + +ALTER TABLE TAB_COL_STATS MODIFY ( + "COLUMN_NAME" VARCHAR2(1000) +); + +ALTER TABLE PART_COL_STATS MODIFY ( + "COLUMN_NAME" VARCHAR2(1000) +); + +--@023-HIVE-12807.oracle.sql; +ALTER TABLE COMPACTION_QUEUE ADD CQ_HIGHEST_TXN_ID NUMBER(19); + +--@024-HIVE-12814.oracle.sql; +ALTER TABLE COMPACTION_QUEUE ADD CQ_META_INFO BLOB; + +--@025-HIVE-12816.oracle.sql; +ALTER TABLE COMPACTION_QUEUE ADD CQ_HADOOP_JOB_ID varchar2(32); + +--@026-HIVE-12818.oracle.sql; +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID NUMBER(19) PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(128) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_WORKER_ID varchar(128), + CC_START NUMBER(19), + CC_END NUMBER(19), + CC_RUN_AS varchar(128), + CC_HIGHEST_TXN_ID NUMBER(19), + CC_META_INFO BLOB, + CC_HADOOP_JOB_ID varchar2(32) +) ROWDEPENDENCIES; + + + +--@027-HIVE-12819.oracle.sql; +ALTER TABLE TXNS ADD TXN_AGENT_INFO varchar2(128); + +--@028-HIVE-12821.oracle.sql; +ALTER TABLE TXNS ADD TXN_HEARTBEAT_COUNT number(10); +ALTER TABLE HIVE_LOCKS ADD HL_HEARTBEAT_COUNT number(10); + +--@029-HIVE-12822.oracle.sql; +ALTER TABLE TXNS ADD TXN_META_INFO varchar2(128); + +--@030-HIVE-12823.oracle.sql; +ALTER TABLE HIVE_LOCKS ADD HL_AGENT_INFO varchar2(128); + +--@031-HIVE-12381.oracle.sql; +ALTER TABLE HIVE_LOCKS ADD HL_BLOCKEDBY_EXT_ID number(19); +ALTER TABLE HIVE_LOCKS ADD HL_BLOCKEDBY_INT_ID number(19); + +--@032-HIVE-12832.oracle.sql; +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar2(128) NOT NULL, + MT_KEY2 number(19) NOT NULL, + MT_COMMENT varchar2(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +); + + +UPDATE VERSION SET SCHEMA_VERSION='2.0.0', VERSION_COMMENT='Hive release version 2.0.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 1.2.0 to 2.0.0' AS Status from dual; diff --git standalone-metastore/src/main/sql/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql standalone-metastore/src/main/sql/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql new file mode 100644 index 0000000000..f3a61f2518 --- /dev/null +++ standalone-metastore/src/main/sql/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql @@ -0,0 +1,39 @@ +SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0' AS Status from dual; + +--@034-HIVE-13076.oracle.sql; +CREATE TABLE KEY_CONSTRAINTS +( + CHILD_CD_ID NUMBER, + CHILD_INTEGER_IDX NUMBER, + CHILD_TBL_ID NUMBER, + PARENT_CD_ID NUMBER NOT NULL, + PARENT_INTEGER_IDX NUMBER NOT NULL, + PARENT_TBL_ID NUMBER NOT NULL, + POSITION NUMBER NOT NULL, + CONSTRAINT_NAME VARCHAR(400) NOT NULL, + CONSTRAINT_TYPE NUMBER NOT NULL, + UPDATE_RULE NUMBER, + DELETE_RULE NUMBER, + ENABLE_VALIDATE_RELY NUMBER NOT NULL +) ; +ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION); +CREATE INDEX CONSTRAINTS_PT_INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID); + +--@035-HIVE-13395.oracle.sql; +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar2(128) NOT NULL, + WS_TABLE varchar2(128) NOT NULL, + WS_PARTITION varchar2(767), + WS_TXNID number(19) NOT NULL, + WS_COMMIT_ID number(19) NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +); + +ALTER TABLE TXN_COMPONENTS ADD TC_OPERATION_TYPE char(1); + +--@036-HIVE-13354.oracle.sql; +ALTER TABLE COMPACTION_QUEUE ADD CQ_TBLPROPERTIES varchar(2048); +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_TBLPROPERTIES varchar(2048); + +UPDATE VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0' AS Status from dual; diff --git standalone-metastore/src/main/sql/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql standalone-metastore/src/main/sql/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql new file mode 100644 index 0000000000..482ef17202 --- /dev/null +++ standalone-metastore/src/main/sql/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql @@ -0,0 +1,58 @@ +SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual; + +--@037-HIVE-14496.oracle.sql; +-- Step 1: Add the column allowing null +ALTER TABLE TBLS ADD IS_REWRITE_ENABLED NUMBER(1) NULL; + + -- Step 2: Replace the null with default value (false) +UPDATE TBLS SET IS_REWRITE_ENABLED = 0; + +-- Step 3: Alter the column to disallow null values +ALTER TABLE TBLS MODIFY(IS_REWRITE_ENABLED DEFAULT 0); +ALTER TABLE TBLS MODIFY(IS_REWRITE_ENABLED NOT NULL); +ALTER TABLE TBLS ADD CONSTRAINT REWRITE_CHECK CHECK (IS_REWRITE_ENABLED IN (1,0)); + +--@038-HIVE-10562.oracle.sql; +ALTER TABLE NOTIFICATION_LOG ADD MESSAGE_FORMAT VARCHAR(16) NULL; + + +--@039-HIVE-12274.oracle.sql; +-- change PARAM_VALUE to CLOBs +ALTER TABLE COLUMNS_V2 ADD (TEMP CLOB); +UPDATE COLUMNS_V2 SET TEMP=TYPE_NAME; +ALTER TABLE COLUMNS_V2 DROP COLUMN TYPE_NAME; +ALTER TABLE COLUMNS_V2 RENAME COLUMN TEMP TO TYPE_NAME; + +ALTER TABLE TABLE_PARAMS ADD (TEMP CLOB); +UPDATE TABLE_PARAMS SET TEMP=PARAM_VALUE, PARAM_VALUE=NULL; +ALTER TABLE TABLE_PARAMS DROP COLUMN PARAM_VALUE; +ALTER TABLE TABLE_PARAMS RENAME COLUMN TEMP TO PARAM_VALUE; + +ALTER TABLE SERDE_PARAMS ADD (TEMP CLOB); +UPDATE SERDE_PARAMS SET TEMP=PARAM_VALUE, PARAM_VALUE=NULL; +ALTER TABLE SERDE_PARAMS DROP COLUMN PARAM_VALUE; +ALTER TABLE SERDE_PARAMS RENAME COLUMN TEMP TO PARAM_VALUE; + +ALTER TABLE SD_PARAMS ADD (TEMP CLOB); +UPDATE SD_PARAMS SET TEMP=PARAM_VALUE, PARAM_VALUE=NULL; +ALTER TABLE SD_PARAMS DROP COLUMN PARAM_VALUE; +ALTER TABLE SD_PARAMS RENAME COLUMN TEMP TO PARAM_VALUE; + +-- Expand the hive table name length to 256 +ALTER TABLE TBLS MODIFY (TBL_NAME VARCHAR2(256)); +ALTER TABLE NOTIFICATION_LOG MODIFY (TBL_NAME VARCHAR2(256)); +ALTER TABLE PARTITION_EVENTS MODIFY (TBL_NAME VARCHAR2(256)); +ALTER TABLE TAB_COL_STATS MODIFY (TABLE_NAME VARCHAR2(256)); +ALTER TABLE PART_COL_STATS MODIFY (TABLE_NAME VARCHAR2(256)); +ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY (CTC_TABLE VARCHAR2(256)); + +-- Expand the hive column name length to 767 +ALTER TABLE COLUMNS_V2 MODIFY (COLUMN_NAME VARCHAR(767)); +ALTER TABLE PART_COL_PRIVS MODIFY (COLUMN_NAME VARCHAR2(767)); +ALTER TABLE TBL_COL_PRIVS MODIFY (COLUMN_NAME VARCHAR2(767)); +ALTER TABLE SORT_COLS MODIFY (COLUMN_NAME VARCHAR2(767)); +ALTER TABLE TAB_COL_STATS MODIFY (COLUMN_NAME VARCHAR2(767)); +ALTER TABLE PART_COL_STATS MODIFY (COLUMN_NAME VARCHAR2(767)); + +UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual; diff --git standalone-metastore/src/main/sql/oracle/upgrade-2.2.0-to-2.3.0.oracle.sql standalone-metastore/src/main/sql/oracle/upgrade-2.2.0-to-2.3.0.oracle.sql new file mode 100644 index 0000000000..5130431f66 --- /dev/null +++ standalone-metastore/src/main/sql/oracle/upgrade-2.2.0-to-2.3.0.oracle.sql @@ -0,0 +1,7 @@ +SELECT 'Upgrading MetaStore schema from 2.2.0 to 2.3.0' AS Status from dual; + +--@040-HIVE-16399.oracle.sql; +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +UPDATE VERSION SET SCHEMA_VERSION='2.3.0', VERSION_COMMENT='Hive release version 2.3.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 2.2.0 to 2.3.0' AS Status from dual; diff --git standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql new file mode 100644 index 0000000000..a923d92a06 --- /dev/null +++ standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql @@ -0,0 +1,158 @@ +SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual; + +--@041-HIVE-16556.oracle.sql; +CREATE TABLE METASTORE_DB_PROPERTIES +( + PROPERTY_KEY VARCHAR(255) NOT NULL, + PROPERTY_VALUE VARCHAR(1000) NOT NULL, + DESCRIPTION VARCHAR(1000) +); + +ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY); + +--@042-HIVE-16575.oracle.sql; +CREATE INDEX CONSTRAINTS_CT_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE); + +--@043-HIVE-16922.oracle.sql; +UPDATE SERDE_PARAMS +SET PARAM_KEY='collection.delim' +WHERE PARAM_KEY='colelction.delim'; + +--@044-HIVE-16997.oracle.sql; +ALTER TABLE PART_COL_STATS ADD BIT_VECTOR BLOB NULL; +ALTER TABLE TAB_COL_STATS ADD BIT_VECTOR BLOB NULL; + +--@045-HIVE-16886.oracle.sql; +INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 FROM DUAL WHERE NOT EXISTS ( SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE); + +--@046-HIVE-17566.oracle.sql; +CREATE TABLE WM_RESOURCEPLAN +( + RP_ID NUMBER NOT NULL, + "NAME" VARCHAR2(128) NOT NULL, + QUERY_PARALLELISM NUMBER(10), + STATUS VARCHAR2(20) NOT NULL, + DEFAULT_POOL_ID NUMBER +); + +ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME"); + + +CREATE TABLE WM_POOL +( + POOL_ID NUMBER NOT NULL, + RP_ID NUMBER NOT NULL, + PATH VARCHAR2(1024) NOT NULL, + ALLOC_FRACTION NUMBER, + QUERY_PARALLELISM NUMBER(10), + SCHEDULING_POLICY VARCHAR2(1024) +); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH); +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + + +CREATE TABLE WM_TRIGGER +( + TRIGGER_ID NUMBER NOT NULL, + RP_ID NUMBER NOT NULL, + "NAME" VARCHAR2(128) NOT NULL, + TRIGGER_EXPRESSION VARCHAR2(1024), + ACTION_EXPRESSION VARCHAR2(1024), + IS_IN_UNMANAGED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_IN_UNMANAGED IN (1,0)) +); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME"); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + + +CREATE TABLE WM_POOL_TO_TRIGGER +( + POOL_ID NUMBER NOT NULL, + TRIGGER_ID NUMBER NOT NULL +); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID); + + +CREATE TABLE WM_MAPPING +( + MAPPING_ID NUMBER NOT NULL, + RP_ID NUMBER NOT NULL, + ENTITY_TYPE VARCHAR2(128) NOT NULL, + ENTITY_NAME VARCHAR2(128) NOT NULL, + POOL_ID NUMBER NOT NULL, + ORDERING NUMBER(10) +); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + +UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual; + +-- 048-HIVE-14498 +CREATE TABLE MV_CREATION_METADATA +( + MV_CREATION_METADATA_ID NUMBER NOT NULL, + DB_NAME VARCHAR2(128) NOT NULL, + TBL_NAME VARCHAR2(256) NOT NULL, + TXN_LIST CLOB NULL +); + +ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID); + +CREATE UNIQUE INDEX UNIQUE_TABLE ON MV_CREATION_METADATA ("DB_NAME", "TBL_NAME"); + +CREATE TABLE MV_TABLES_USED +( + MV_CREATION_METADATA_ID NUMBER NOT NULL, + TBL_ID NUMBER NOT NULL +); + +ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK1 FOREIGN KEY (MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID); + +ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK2 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID); + +ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_TIMESTAMP timestamp NULL; + +UPDATE COMPLETED_TXN_COMPONENTS SET CTC_TIMESTAMP = CURRENT_TIMESTAMP; + +ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY(CTC_TIMESTAMP DEFAULT CURRENT_TIMESTAMP); + +ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY(CTC_TIMESTAMP NOT NULL); + +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +-- 049-HIVE-18489 +UPDATE FUNC_RU + SET RESOURCE_URI = 's3a' || SUBSTR(RESOURCE_URI, 4) + WHERE RESOURCE_URI LIKE 's3n://%' ; + +UPDATE SKEWED_COL_VALUE_LOC_MAP + SET LOCATION = 's3a' || SUBSTR(LOCATION, 4) + WHERE LOCATION LIKE 's3n://%' ; + +UPDATE SDS + SET LOCATION = 's3a' || SUBSTR(LOCATION, 4) + WHERE LOCATION LIKE 's3n://%' ; + +UPDATE DBS + SET DB_LOCATION_URI = 's3a' || SUBSTR(DB_LOCATION_URI, 4) + WHERE DB_LOCATION_URI LIKE 's3n://%' ; diff --git standalone-metastore/src/main/sql/oracle/upgrade.order.oracle standalone-metastore/src/main/sql/oracle/upgrade.order.oracle new file mode 100644 index 0000000000..a18b062732 --- /dev/null +++ standalone-metastore/src/main/sql/oracle/upgrade.order.oracle @@ -0,0 +1,12 @@ +0.9.0-to-0.10.0 +0.10.0-to-0.11.0 +0.11.0-to-0.12.0 +0.12.0-to-0.13.0 +0.13.0-to-0.14.0 +0.14.0-to-1.1.0 +1.1.0-to-1.2.0 +1.2.0-to-2.0.0 +2.0.0-to-2.1.0 +2.1.0-to-2.2.0 +2.2.0-to-2.3.0 +2.3.0-to-3.0.0 diff --git standalone-metastore/src/main/sql/postgres/create-user.postgres.sql standalone-metastore/src/main/sql/postgres/create-user.postgres.sql new file mode 100644 index 0000000000..90e68dcdd2 --- /dev/null +++ standalone-metastore/src/main/sql/postgres/create-user.postgres.sql @@ -0,0 +1,2 @@ +CREATE ROLE _REPLACE_WITH_USER_ LOGIN PASSWORD '_REPLACE_WITH_PASSWD_'; +CREATE DATABASE _REPLACE_WITH_DB_ OWNER _REPLACE_WITH_USER_; diff --git standalone-metastore/src/main/sql/postgres/hive-schema-1.2.0.postgres.sql standalone-metastore/src/main/sql/postgres/hive-schema-1.2.0.postgres.sql new file mode 100644 index 0000000000..f19b9e52ff --- /dev/null +++ standalone-metastore/src/main/sql/postgres/hive-schema-1.2.0.postgres.sql @@ -0,0 +1,1562 @@ +-- +-- PostgreSQL database dump +-- + +SET statement_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = off; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET escape_string_warning = off; + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "BUCKETING_COLS" ( + "SD_ID" bigint NOT NULL, + "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "CDS" ( + "CD_ID" bigint NOT NULL +); + + +-- +-- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "COLUMNS_OLD" ( + "SD_ID" bigint NOT NULL, + "COMMENT" character varying(256) DEFAULT NULL::character varying, + "COLUMN_NAME" character varying(128) NOT NULL, + "TYPE_NAME" character varying(4000) NOT NULL, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "COLUMNS_V2" ( + "CD_ID" bigint NOT NULL, + "COMMENT" character varying(4000), + "COLUMN_NAME" character varying(128) NOT NULL, + "TYPE_NAME" character varying(4000), + "INTEGER_IDX" integer NOT NULL +); + + +-- +-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "DATABASE_PARAMS" ( + "DB_ID" bigint NOT NULL, + "PARAM_KEY" character varying(180) NOT NULL, + "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying +); + + +-- +-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "DBS" ( + "DB_ID" bigint NOT NULL, + "DESC" character varying(4000) DEFAULT NULL::character varying, + "DB_LOCATION_URI" character varying(4000) NOT NULL, + "NAME" character varying(128) DEFAULT NULL::character varying, + "OWNER_NAME" character varying(128) DEFAULT NULL::character varying, + "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying +); + + +-- +-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "DB_PRIVS" ( + "DB_GRANT_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "DB_ID" bigint, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "DB_PRIV" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "GLOBAL_PRIVS" ( + "USER_GRANT_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "USER_PRIV" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "IDXS" ( + "INDEX_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "DEFERRED_REBUILD" boolean NOT NULL, + "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying, + "INDEX_NAME" character varying(128) DEFAULT NULL::character varying, + "INDEX_TBL_ID" bigint, + "LAST_ACCESS_TIME" bigint NOT NULL, + "ORIG_TBL_ID" bigint, + "SD_ID" bigint +); + + +-- +-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "INDEX_PARAMS" ( + "INDEX_ID" bigint NOT NULL, + "PARAM_KEY" character varying(256) NOT NULL, + "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying +); + + +-- +-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "NUCLEUS_TABLES" ( + "CLASS_NAME" character varying(128) NOT NULL, + "TABLE_NAME" character varying(128) NOT NULL, + "TYPE" character varying(4) NOT NULL, + "OWNER" character varying(2) NOT NULL, + "VERSION" character varying(20) NOT NULL, + "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying +); + + +-- +-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITIONS" ( + "PART_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "LAST_ACCESS_TIME" bigint NOT NULL, + "PART_NAME" character varying(767) DEFAULT NULL::character varying, + "SD_ID" bigint, + "TBL_ID" bigint +); + + +-- +-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITION_EVENTS" ( + "PART_NAME_ID" bigint NOT NULL, + "DB_NAME" character varying(128), + "EVENT_TIME" bigint NOT NULL, + "EVENT_TYPE" integer NOT NULL, + "PARTITION_NAME" character varying(767), + "TBL_NAME" character varying(128) +); + + +-- +-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITION_KEYS" ( + "TBL_ID" bigint NOT NULL, + "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying, + "PKEY_NAME" character varying(128) NOT NULL, + "PKEY_TYPE" character varying(767) NOT NULL, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITION_KEY_VALS" ( + "PART_ID" bigint NOT NULL, + "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITION_PARAMS" ( + "PART_ID" bigint NOT NULL, + "PARAM_KEY" character varying(256) NOT NULL, + "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying +); + + +-- +-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PART_COL_PRIVS" ( + "PART_COLUMN_GRANT_ID" bigint NOT NULL, + "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_ID" bigint, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PART_PRIVS" ( + "PART_GRANT_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_ID" bigint, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_PRIV" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "ROLES" ( + "ROLE_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "OWNER_NAME" character varying(128) DEFAULT NULL::character varying, + "ROLE_NAME" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "ROLE_MAP" ( + "ROLE_GRANT_ID" bigint NOT NULL, + "ADD_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "ROLE_ID" bigint +); + + +-- +-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SDS" ( + "SD_ID" bigint NOT NULL, + "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying, + "IS_COMPRESSED" boolean NOT NULL, + "LOCATION" character varying(4000) DEFAULT NULL::character varying, + "NUM_BUCKETS" bigint NOT NULL, + "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying, + "SERDE_ID" bigint, + "CD_ID" bigint, + "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL +); + + +-- +-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SD_PARAMS" ( + "SD_ID" bigint NOT NULL, + "PARAM_KEY" character varying(256) NOT NULL, + "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying +); + + +-- +-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SEQUENCE_TABLE" ( + "SEQUENCE_NAME" character varying(255) NOT NULL, + "NEXT_VAL" bigint NOT NULL +); + + +-- +-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SERDES" ( + "SERDE_ID" bigint NOT NULL, + "NAME" character varying(128) DEFAULT NULL::character varying, + "SLIB" character varying(4000) DEFAULT NULL::character varying +); + + +-- +-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SERDE_PARAMS" ( + "SERDE_ID" bigint NOT NULL, + "PARAM_KEY" character varying(256) NOT NULL, + "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying +); + + +-- +-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SORT_COLS" ( + "SD_ID" bigint NOT NULL, + "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying, + "ORDER" bigint NOT NULL, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TABLE_PARAMS" ( + "TBL_ID" bigint NOT NULL, + "PARAM_KEY" character varying(256) NOT NULL, + "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying +); + + +-- +-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TBLS" ( + "TBL_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "DB_ID" bigint, + "LAST_ACCESS_TIME" bigint NOT NULL, + "OWNER" character varying(767) DEFAULT NULL::character varying, + "RETENTION" bigint NOT NULL, + "SD_ID" bigint, + "TBL_NAME" character varying(128) DEFAULT NULL::character varying, + "TBL_TYPE" character varying(128) DEFAULT NULL::character varying, + "VIEW_EXPANDED_TEXT" text, + "VIEW_ORIGINAL_TEXT" text +); + + +-- +-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TBL_COL_PRIVS" ( + "TBL_COLUMN_GRANT_ID" bigint NOT NULL, + "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying, + "TBL_ID" bigint +); + + +-- +-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TBL_PRIVS" ( + "TBL_GRANT_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "TBL_PRIV" character varying(128) DEFAULT NULL::character varying, + "TBL_ID" bigint +); + + +-- +-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TYPES" ( + "TYPES_ID" bigint NOT NULL, + "TYPE_NAME" character varying(128) DEFAULT NULL::character varying, + "TYPE1" character varying(767) DEFAULT NULL::character varying, + "TYPE2" character varying(767) DEFAULT NULL::character varying +); + + +-- +-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TYPE_FIELDS" ( + "TYPE_NAME" bigint NOT NULL, + "COMMENT" character varying(256) DEFAULT NULL::character varying, + "FIELD_NAME" character varying(128) NOT NULL, + "FIELD_TYPE" character varying(767) NOT NULL, + "INTEGER_IDX" bigint NOT NULL +); + +CREATE TABLE "SKEWED_STRING_LIST" ( + "STRING_LIST_ID" bigint NOT NULL +); + +CREATE TABLE "SKEWED_STRING_LIST_VALUES" ( + "STRING_LIST_ID" bigint NOT NULL, + "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying, + "INTEGER_IDX" bigint NOT NULL +); + +CREATE TABLE "SKEWED_COL_NAMES" ( + "SD_ID" bigint NOT NULL, + "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying, + "INTEGER_IDX" bigint NOT NULL +); + +CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" ( + "SD_ID" bigint NOT NULL, + "STRING_LIST_ID_KID" bigint NOT NULL, + "LOCATION" character varying(4000) DEFAULT NULL::character varying +); + +CREATE TABLE "SKEWED_VALUES" ( + "SD_ID_OID" bigint NOT NULL, + "STRING_LIST_ID_EID" bigint NOT NULL, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "MASTER_KEYS" +( + "KEY_ID" SERIAL, + "MASTER_KEY" varchar(767) NULL, + PRIMARY KEY ("KEY_ID") +); + +CREATE TABLE "DELEGATION_TOKENS" +( + "TOKEN_IDENT" varchar(767) NOT NULL, + "TOKEN" varchar(767) NULL, + PRIMARY KEY ("TOKEN_IDENT") +); + +CREATE TABLE "TAB_COL_STATS" ( + "CS_ID" bigint NOT NULL, + "DB_NAME" character varying(128) DEFAULT NULL::character varying, + "TABLE_NAME" character varying(128) DEFAULT NULL::character varying, + "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying, + "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying, + "TBL_ID" bigint NOT NULL, + "LONG_LOW_VALUE" bigint, + "LONG_HIGH_VALUE" bigint, + "DOUBLE_LOW_VALUE" double precision, + "DOUBLE_HIGH_VALUE" double precision, + "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying, + "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying, + "NUM_NULLS" bigint NOT NULL, + "NUM_DISTINCTS" bigint, + "AVG_COL_LEN" double precision, + "MAX_COL_LEN" bigint, + "NUM_TRUES" bigint, + "NUM_FALSES" bigint, + "LAST_ANALYZED" bigint NOT NULL +); + +-- +-- Table structure for VERSION +-- +CREATE TABLE "VERSION" ( + "VER_ID" bigint, + "SCHEMA_VERSION" character varying(127) NOT NULL, + "VERSION_COMMENT" character varying(255) NOT NULL +); + +-- +-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PART_COL_STATS" ( + "CS_ID" bigint NOT NULL, + "DB_NAME" character varying(128) DEFAULT NULL::character varying, + "TABLE_NAME" character varying(128) DEFAULT NULL::character varying, + "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying, + "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying, + "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_ID" bigint NOT NULL, + "LONG_LOW_VALUE" bigint, + "LONG_HIGH_VALUE" bigint, + "DOUBLE_LOW_VALUE" double precision, + "DOUBLE_HIGH_VALUE" double precision, + "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying, + "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying, + "NUM_NULLS" bigint NOT NULL, + "NUM_DISTINCTS" bigint, + "AVG_COL_LEN" double precision, + "MAX_COL_LEN" bigint, + "NUM_TRUES" bigint, + "NUM_FALSES" bigint, + "LAST_ANALYZED" bigint NOT NULL +); + +-- +-- Table structure for FUNCS +-- +CREATE TABLE "FUNCS" ( + "FUNC_ID" BIGINT NOT NULL, + "CLASS_NAME" VARCHAR(4000), + "CREATE_TIME" INTEGER NOT NULL, + "DB_ID" BIGINT, + "FUNC_NAME" VARCHAR(128), + "FUNC_TYPE" INTEGER NOT NULL, + "OWNER_NAME" VARCHAR(128), + "OWNER_TYPE" VARCHAR(10), + PRIMARY KEY ("FUNC_ID") +); + +-- +-- Table structure for FUNC_RU +-- +CREATE TABLE "FUNC_RU" ( + "FUNC_ID" BIGINT NOT NULL, + "RESOURCE_TYPE" INTEGER NOT NULL, + "RESOURCE_URI" VARCHAR(4000), + "INTEGER_IDX" INTEGER NOT NULL, + PRIMARY KEY ("FUNC_ID", "INTEGER_IDX") +); + +CREATE TABLE "NOTIFICATION_LOG" +( + "NL_ID" BIGINT NOT NULL, + "EVENT_ID" BIGINT NOT NULL, + "EVENT_TIME" INTEGER NOT NULL, + "EVENT_TYPE" VARCHAR(32) NOT NULL, + "DB_NAME" VARCHAR(128), + "TBL_NAME" VARCHAR(128), + "MESSAGE" text, + PRIMARY KEY ("NL_ID") +); + +CREATE TABLE "NOTIFICATION_SEQUENCE" +( + "NNI_ID" BIGINT NOT NULL, + "NEXT_EVENT_ID" BIGINT NOT NULL, + PRIMARY KEY ("NNI_ID") +); + +-- +-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "BUCKETING_COLS" + ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + + +-- +-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "CDS" + ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID"); + + +-- +-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "COLUMNS_V2" + ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME"); + + +-- +-- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "COLUMNS_OLD" + ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME"); + + +-- +-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DATABASE_PARAMS" + ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY"); + + +-- +-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DB_PRIVS" + ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DBS" + ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID"); + + +-- +-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DB_PRIVS" + ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID"); + + +-- +-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "GLOBAL_PRIVS" + ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "GLOBAL_PRIVS" + ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID"); + + +-- +-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "IDXS" + ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID"); + + +-- +-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "INDEX_PARAMS" + ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY"); + + +-- +-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "NUCLEUS_TABLES" + ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME"); + + +-- +-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITIONS" + ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID"); + + +-- +-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITION_EVENTS" + ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID"); + + +-- +-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITION_KEYS" + ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME"); + + +-- +-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITION_KEY_VALS" + ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX"); + + +-- +-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITION_PARAMS" + ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY"); + + +-- +-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PART_COL_PRIVS" + ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID"); + + +-- +-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PART_PRIVS" + ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID"); + + +-- +-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "ROLES" + ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME"); + + +-- +-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "ROLES" + ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID"); + + +-- +-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "ROLE_MAP" + ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID"); + + +-- +-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SDS" + ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID"); + + +-- +-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SD_PARAMS" + ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY"); + + +-- +-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SEQUENCE_TABLE" + ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME"); + + +-- +-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SERDES" + ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID"); + + +-- +-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SERDE_PARAMS" + ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY"); + + +-- +-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SORT_COLS" + ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + + +-- +-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TABLE_PARAMS" + ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY"); + + +-- +-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TBLS" + ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID"); + + +-- +-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TBL_COL_PRIVS" + ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID"); + + +-- +-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TBL_PRIVS" + ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID"); + + +-- +-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TYPES" + ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID"); + + +-- +-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TYPE_FIELDS" + ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME"); + +ALTER TABLE ONLY "SKEWED_STRING_LIST" + ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID"); + +ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES" + ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX"); + + +ALTER TABLE ONLY "SKEWED_COL_NAMES" + ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP" + ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID"); + +ALTER TABLE ONLY "SKEWED_VALUES" + ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX"); + +-- +-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- +ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID"); + +-- +-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- +ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID"); + +-- +-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "IDXS" + ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID"); + + +-- +-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITIONS" + ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID"); + + +-- +-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TBLS" + ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID"); + + +-- +-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DBS" + ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME"); + + +-- +-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TYPES" + ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME"); + + +-- +-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "ROLE_MAP" + ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID"); + + +-- +-- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID"); + + +-- +-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID"); + + +-- +-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID"); + + +-- +-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID"); + + +-- +-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID"); + + +-- +-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID"); + + +-- +-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID"); + + +-- +-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME"); + + +-- +-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID"); + + +-- +-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID"); + + +-- +-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID"); + + +-- +-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID"); + + +-- +-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID"); + + +-- +-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID"); + + +-- +-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID"); + + +-- +-- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); + + +-- +-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID"); + + +-- +-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID"); + + +-- +-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID"); + + +-- +-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID"); + + +-- +-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID"); + + +-- +-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID"); + + +-- +-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID"); + + +-- +-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID"); + + +-- +-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID"); + + +-- +-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID"); + + +-- +-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME"); + +-- +-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID"); + +-- +-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID"); + +-- +-- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID"); + +-- +-- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID"); + +-- +-- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID"); + + +ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES" + ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE; + + +ALTER TABLE ONLY "SKEWED_COL_NAMES" + ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP" + ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + +ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP" + ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE; + +ALTER TABLE ONLY "SKEWED_VALUES" + ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE; + +ALTER TABLE ONLY "SKEWED_VALUES" + ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "BUCKETING_COLS" + ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "COLUMNS_OLD" + ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "COLUMNS_V2" + ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE; + + +-- +-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "DATABASE_PARAMS" + ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE; + + +-- +-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "DB_PRIVS" + ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE; + + +-- +-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "IDXS" + ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "IDXS" + ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "IDXS" + ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "INDEX_PARAMS" + ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE; + + +-- +-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITIONS" + ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITIONS" + ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITION_KEYS" + ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITION_KEY_VALS" + ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + + +-- +-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITION_PARAMS" + ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + + +-- +-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PART_COL_PRIVS" + ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + + +-- +-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PART_PRIVS" + ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + + +-- +-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "ROLE_MAP" + ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE; + + +-- +-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SDS" + ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE; + + +-- +-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SDS" + ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE; + + +-- +-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SD_PARAMS" + ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SERDE_PARAMS" + ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE; + + +-- +-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SORT_COLS" + ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TABLE_PARAMS" + ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TBLS" + ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE; + + +-- +-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TBLS" + ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TBL_COL_PRIVS" + ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TBL_PRIVS" + ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TYPE_FIELDS" + ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE; + +-- +-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- +ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- +ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + + +ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID"); + +-- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +ALTER TABLE ONLY "FUNCS" + ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE; + +-- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +ALTER TABLE ONLY "FUNC_RU" + ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE; + +-- +-- Name: public; Type: ACL; Schema: -; Owner: hiveuser +-- + +REVOKE ALL ON SCHEMA public FROM PUBLIC; +GRANT ALL ON SCHEMA public TO PUBLIC; + +-- +-- PostgreSQL database dump complete +-- + +------------------------------ +-- Transaction and lock tables +------------------------------ +CREATE TABLE TXNS ( + TXN_ID bigint PRIMARY KEY, + TXN_STATE char(1) NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL +); + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID bigint REFERENCES TXNS (TXN_ID), + TC_DATABASE varchar(128) NOT NULL, + TC_TABLE varchar(128), + TC_PARTITION varchar(767) DEFAULT NULL +); + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID bigint, + CTC_DATABASE varchar(128) NOT NULL, + CTC_TABLE varchar(128), + CTC_PARTITION varchar(767) +); + +CREATE TABLE NEXT_TXN_ID ( + NTXN_NEXT bigint NOT NULL +); +INSERT INTO NEXT_TXN_ID VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint, + HL_DB varchar(128) NOT NULL, + HL_TABLE varchar(128), + HL_PARTITION varchar(767) DEFAULT NULL, + HL_LOCK_STATE char(1) NOT NULL, + HL_LOCK_TYPE char(1) NOT NULL, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint, + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID) +); + +CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID); + +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT bigint NOT NULL +); +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID bigint PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(128) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_WORKER_ID varchar(128), + CQ_START bigint, + CQ_RUN_AS varchar(128) +); + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT bigint NOT NULL +); +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + + + + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '1.2.0', 'Hive release version 1.2.0'); diff --git metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql similarity index 93% rename from metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql rename to standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql index af71ed3bbe..9d63056376 100644 --- metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql +++ standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql @@ -1611,7 +1611,123 @@ GRANT ALL ON SCHEMA public TO PUBLIC; ------------------------------ -- Transaction and lock tables ------------------------------ -\i hive-txn-schema-3.0.0.postgres.sql; +CREATE TABLE TXNS ( + TXN_ID bigint PRIMARY KEY, + TXN_STATE char(1) NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL, + TXN_AGENT_INFO varchar(128), + TXN_META_INFO varchar(128), + TXN_HEARTBEAT_COUNT integer +); + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID bigint REFERENCES TXNS (TXN_ID), + TC_DATABASE varchar(128) NOT NULL, + TC_TABLE varchar(128), + TC_PARTITION varchar(767) DEFAULT NULL, + TC_OPERATION_TYPE char(1) NOT NULL +); + +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS USING hash (TC_TXNID); + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID bigint, + CTC_DATABASE varchar(128) NOT NULL, + CTC_TABLE varchar(256), + CTC_PARTITION varchar(767), + CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL +); + +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +CREATE TABLE NEXT_TXN_ID ( + NTXN_NEXT bigint NOT NULL +); +INSERT INTO NEXT_TXN_ID VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint, + HL_DB varchar(128) NOT NULL, + HL_TABLE varchar(128), + HL_PARTITION varchar(767) DEFAULT NULL, + HL_LOCK_STATE char(1) NOT NULL, + HL_LOCK_TYPE char(1) NOT NULL, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint, + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + HL_HEARTBEAT_COUNT integer, + HL_AGENT_INFO varchar(128), + HL_BLOCKEDBY_EXT_ID bigint, + HL_BLOCKEDBY_INT_ID bigint, + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID) +); + +CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID); + +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT bigint NOT NULL +); +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID bigint PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(128) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_TBLPROPERTIES varchar(2048), + CQ_WORKER_ID varchar(128), + CQ_START bigint, + CQ_RUN_AS varchar(128), + CQ_HIGHEST_TXN_ID bigint, + CQ_META_INFO bytea, + CQ_HADOOP_JOB_ID varchar(32) +); + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT bigint NOT NULL +); +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID bigint PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(128) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_TBLPROPERTIES varchar(2048), + CC_WORKER_ID varchar(128), + CC_START bigint, + CC_END bigint, + CC_RUN_AS varchar(128), + CC_HIGHEST_TXN_ID bigint, + CC_META_INFO bytea, + CC_HADOOP_JOB_ID varchar(32) +); + +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar(128) NOT NULL, + MT_KEY2 bigint NOT NULL, + MT_COMMENT varchar(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +); + +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar(128) NOT NULL, + WS_TABLE varchar(128) NOT NULL, + WS_PARTITION varchar(767), + WS_TXNID bigint NOT NULL, + WS_COMMIT_ID bigint NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +); -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script diff --git standalone-metastore/src/main/sql/postgres/upgrade-1.2.0-to-2.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/upgrade-1.2.0-to-2.0.0.postgres.sql new file mode 100644 index 0000000000..984f0a60a4 --- /dev/null +++ standalone-metastore/src/main/sql/postgres/upgrade-1.2.0-to-2.0.0.postgres.sql @@ -0,0 +1,73 @@ +SELECT 'Upgrading MetaStore schema from 1.2.0 to 2.0.0'; + +--\i 021-HIVE-11970.postgres.sql; +ALTER TABLE "COLUMNS_V2" ALTER "COLUMN_NAME" TYPE character varying(1000); +ALTER TABLE "PART_COL_PRIVS" ALTER "COLUMN_NAME" TYPE character varying(1000); +ALTER TABLE "TBL_COL_PRIVS" ALTER "COLUMN_NAME" TYPE character varying(1000); +ALTER TABLE "SORT_COLS" ALTER "COLUMN_NAME" TYPE character varying(1000); +ALTER TABLE "TAB_COL_STATS" ALTER "COLUMN_NAME" TYPE character varying(1000); +ALTER TABLE "PART_COL_STATS" ALTER "COLUMN_NAME" TYPE character varying(1000); + +--\i 022-HIVE-12807.postgres.sql; +ALTER TABLE COMPACTION_QUEUE ADD COLUMN CQ_HIGHEST_TXN_ID bigint; + +--\i 023-HIVE-12814.postgres.sql; +ALTER TABLE COMPACTION_QUEUE ADD COLUMN CQ_META_INFO bytea; + +--\i 024-HIVE-12816.postgres.sql; +ALTER TABLE COMPACTION_QUEUE ADD COLUMN CQ_HADOOP_JOB_ID varchar(32); + +--\i 025-HIVE-12818.postgres.sql; +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID bigint PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(128) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_WORKER_ID varchar(128), + CC_START bigint, + CC_END bigint, + CC_RUN_AS varchar(128), + CC_HIGHEST_TXN_ID bigint, + CC_META_INFO bytea, + CC_HADOOP_JOB_ID varchar(32) +); + + + +--\i 026-HIVE-12819.postgres.sql; +ALTER TABLE TXNS ADD COLUMN TXN_AGENT_INFO varchar(128); + +--\i 027-HIVE-12821.postgres.sql; +ALTER TABLE TXNS ADD COLUMN TXN_HEARTBEAT_COUNT integer; +ALTER TABLE HIVE_LOCKS ADD COLUMN HL_HEARTBEAT_COUNT integer; + +--\i 028-HIVE-12822.postgres.sql; +ALTER TABLE TXNS ADD COLUMN TXN_META_INFO varchar(128); + +--\i 029-HIVE-12823.postgres.sql; +ALTER TABLE HIVE_LOCKS ADD COLUMN HL_AGENT_INFO varchar(128); + +--\i 030-HIVE-12831.postgres.sql; +ALTER TABLE HIVE_LOCKS ADD COLUMN HL_BLOCKEDBY_EXT_ID bigint; +ALTER TABLE HIVE_LOCKS ADD COLUMN HL_BLOCKEDBY_INT_ID bigint; + +--\i 031-HIVE-12832.postgres.sql; +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar(128) NOT NULL, + MT_KEY2 bigint NOT NULL, + MT_COMMENT varchar(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +); + + +UPDATE "VERSION" SET "SCHEMA_VERSION"='2.0.0', "VERSION_COMMENT"='Hive release version 2.0.0' where "VER_ID"=1; +SELECT 'Finished upgrading MetaStore schema from 1.2.0 to 2.0.0'; + + +-- +-- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + + diff --git standalone-metastore/src/main/sql/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql standalone-metastore/src/main/sql/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql new file mode 100644 index 0000000000..4c23e8abc1 --- /dev/null +++ standalone-metastore/src/main/sql/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql @@ -0,0 +1,40 @@ +SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0'; + +--\i 033-HIVE-13076.postgres.sql; +CREATE TABLE "KEY_CONSTRAINTS" +( + "CHILD_CD_ID" BIGINT, + "CHILD_INTEGER_IDX" BIGINT, + "CHILD_TBL_ID" BIGINT, + "PARENT_CD_ID" BIGINT NOT NULL, + "PARENT_INTEGER_IDX" BIGINT NOT NULL, + "PARENT_TBL_ID" BIGINT NOT NULL, + "POSITION" BIGINT NOT NULL, + "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, + "CONSTRAINT_TYPE" SMALLINT NOT NULL, + "UPDATE_RULE" SMALLINT, + "DELETE_RULE" SMALLINT, + "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL, + PRIMARY KEY ("CONSTRAINT_NAME", "POSITION") +) ; +CREATE INDEX "CONSTRAINTS_PARENT_TBLID_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("PARENT_TBL_ID"); + +--\i 034-HIVE-13395.postgres.sql; +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar(128) NOT NULL, + WS_TABLE varchar(128) NOT NULL, + WS_PARTITION varchar(767), + WS_TXNID bigint NOT NULL, + WS_COMMIT_ID bigint NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +); + +ALTER TABLE TXN_COMPONENTS ADD TC_OPERATION_TYPE char(1); + +--\i 035-HIVE-13354.postgres.sql; +ALTER TABLE COMPACTION_QUEUE ADD CQ_TBLPROPERTIES varchar(2048); +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_TBLPROPERTIES varchar(2048); + +UPDATE "VERSION" SET "SCHEMA_VERSION"='2.1.0', "VERSION_COMMENT"='Hive release version 2.1.0' where "VER_ID"=1; +SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0'; + diff --git standalone-metastore/src/main/sql/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql standalone-metastore/src/main/sql/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql new file mode 100644 index 0000000000..2fc1602b43 --- /dev/null +++ standalone-metastore/src/main/sql/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql @@ -0,0 +1,39 @@ +SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0'; + +--\i 036-HIVE-14496.postgres.sql; +-- Step 1: Add the column allowing null +ALTER TABLE "TBLS" ADD COLUMN "IS_REWRITE_ENABLED" boolean NULL; + + -- Step 2: Replace the null with default value (false) +UPDATE "TBLS" SET "IS_REWRITE_ENABLED" = false; + +-- Step 3: Alter the column to disallow null values +ALTER TABLE "TBLS" ALTER COLUMN "IS_REWRITE_ENABLED" SET NOT NULL; +ALTER TABLE "TBLS" ALTER COLUMN "IS_REWRITE_ENABLED" SET DEFAULT false; + +--\i 037-HIVE-10562.postgres.sql; +ALTER TABLE "NOTIFICATION_LOG" ADD COLUMN "MESSAGE_FORMAT" VARCHAR(16) NULL; + +--\i 038-HIVE-12274.postgres.sql; +alter table "SERDE_PARAMS" alter column "PARAM_VALUE" type text using cast("PARAM_VALUE" as text); +alter table "TABLE_PARAMS" alter column "PARAM_VALUE" type text using cast("PARAM_VALUE" as text); +alter table "SD_PARAMS" alter column "PARAM_VALUE" type text using cast("PARAM_VALUE" as text); +alter table "COLUMNS_V2" alter column "TYPE_NAME" type text using cast("TYPE_NAME" as text); + +alter table "TBLS" ALTER COLUMN "TBL_NAME" TYPE varchar(256); +alter table "NOTIFICATION_LOG" alter column "TBL_NAME" TYPE varchar(256); +alter table "PARTITION_EVENTS" alter column "TBL_NAME" TYPE varchar(256); +alter table "TAB_COL_STATS" alter column "TABLE_NAME" TYPE varchar(256); +alter table "PART_COL_STATS" alter column "TABLE_NAME" TYPE varchar(256); +alter table COMPLETED_TXN_COMPONENTS alter column CTC_TABLE TYPE varchar(256); + +alter table "COLUMNS_V2" alter column "COLUMN_NAME" TYPE varchar(767); +alter table "PART_COL_PRIVS" alter column "COLUMN_NAME" TYPE varchar(767); +alter table "TBL_COL_PRIVS" alter column "COLUMN_NAME" TYPE varchar(767); +alter table "SORT_COLS" alter column "COLUMN_NAME" TYPE varchar(767); +alter table "TAB_COL_STATS" alter column "COLUMN_NAME" TYPE varchar(767); +alter table "PART_COL_STATS" alter column "COLUMN_NAME" TYPE varchar(767); + +UPDATE "VERSION" SET "SCHEMA_VERSION"='2.2.0', "VERSION_COMMENT"='Hive release version 2.2.0' where "VER_ID"=1; +SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0'; + diff --git standalone-metastore/src/main/sql/postgres/upgrade-2.2.0-to-2.3.0.postgres.sql standalone-metastore/src/main/sql/postgres/upgrade-2.2.0-to-2.3.0.postgres.sql new file mode 100644 index 0000000000..c54e51834b --- /dev/null +++ standalone-metastore/src/main/sql/postgres/upgrade-2.2.0-to-2.3.0.postgres.sql @@ -0,0 +1,8 @@ +SELECT 'Upgrading MetaStore schema from 2.2.0 to 2.3.0'; + +--\i 039-HIVE-16399.postgres.sql; +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS USING hash (TC_TXNID); + +UPDATE "VERSION" SET "SCHEMA_VERSION"='2.3.0', "VERSION_COMMENT"='Hive release version 2.3.0' where "VER_ID"=1; +SELECT 'Finished upgrading MetaStore schema from 2.2.0 to 2.3.0'; + diff --git standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql new file mode 100644 index 0000000000..eb45cd24a6 --- /dev/null +++ standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql @@ -0,0 +1,174 @@ +SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0'; + +--\i 040-HIVE-16556.postgres.sql; +CREATE TABLE "METASTORE_DB_PROPERTIES" +( + "PROPERTY_KEY" VARCHAR(255) NOT NULL, + "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, + "DESCRIPTION" VARCHAR(1000) +); + +ALTER TABLE ONLY "METASTORE_DB_PROPERTIES" + ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY"); + +--\i 041-HIVE-16575.postgres.sql; +CREATE INDEX "CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("CONSTRAINT_TYPE"); + +--\i 042-HIVE-16922.postgres.sql; +UPDATE "SERDE_PARAMS" +SET "PARAM_KEY"='collection.delim' +WHERE "PARAM_KEY"='colelction.delim'; + +--\i 043-HIVE-16997.postgres.sql; +ALTER TABLE "PART_COL_STATS" ADD COLUMN "BIT_VECTOR" BYTEA; +ALTER TABLE "TAB_COL_STATS" ADD COLUMN "BIT_VECTOR" BYTEA; + +--\i 044-HIVE-16886.postgres.sql; +INSERT INTO "NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT 1,1 WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "NOTIFICATION_SEQUENCE"); + +--\i 045-HIVE-17566.postgres.sql; +CREATE TABLE "WM_RESOURCEPLAN" ( + "RP_ID" bigint NOT NULL, + "NAME" character varying(128) NOT NULL, + "QUERY_PARALLELISM" integer, + "STATUS" character varying(20) NOT NULL, + "DEFAULT_POOL_ID" bigint +); + +ALTER TABLE ONLY "WM_RESOURCEPLAN" + ADD CONSTRAINT "WM_RESOURCEPLAN_pkey" PRIMARY KEY ("RP_ID"); + +ALTER TABLE ONLY "WM_RESOURCEPLAN" + ADD CONSTRAINT "UNIQUE_WM_RESOURCEPLAN" UNIQUE ("NAME"); + + +CREATE TABLE "WM_POOL" ( + "POOL_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "PATH" character varying(1024) NOT NULL, + "ALLOC_FRACTION" double precision, + "QUERY_PARALLELISM" integer, + "SCHEDULING_POLICY" character varying(1024) +); + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "WM_POOL_pkey" PRIMARY KEY ("POOL_ID"); + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "UNIQUE_WM_POOL" UNIQUE ("RP_ID", "PATH"); + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; +ALTER TABLE ONLY "WM_RESOURCEPLAN" + ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; + + +CREATE TABLE "WM_TRIGGER" ( + "TRIGGER_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "NAME" character varying(128) NOT NULL, + "TRIGGER_EXPRESSION" character varying(1024) DEFAULT NULL::character varying, + "ACTION_EXPRESSION" character varying(1024) DEFAULT NULL::character varying, + "IS_IN_UNMANAGED" boolean NOT NULL DEFAULT false +); + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "WM_TRIGGER_pkey" PRIMARY KEY ("TRIGGER_ID"); + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "UNIQUE_WM_TRIGGER" UNIQUE ("RP_ID", "NAME"); + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; + + +CREATE TABLE "WM_POOL_TO_TRIGGER" ( + "POOL_ID" bigint NOT NULL, + "TRIGGER_ID" bigint NOT NULL +); + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_pkey" PRIMARY KEY ("POOL_ID", "TRIGGER_ID"); + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "WM_TRIGGER" ("TRIGGER_ID") DEFERRABLE; + + +CREATE TABLE "WM_MAPPING" ( + "MAPPING_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "ENTITY_TYPE" character varying(128) NOT NULL, + "ENTITY_NAME" character varying(128) NOT NULL, + "POOL_ID" bigint, + "ORDERING" integer +); + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_pkey" PRIMARY KEY ("MAPPING_ID"); + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "UNIQUE_WM_MAPPING" UNIQUE ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME"); + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; + +UPDATE "VERSION" SET "SCHEMA_VERSION"='3.0.0', "VERSION_COMMENT"='Hive release version 3.0.0' where "VER_ID"=1; +SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0'; + +-- 047-HIVE-14498 +CREATE TABLE "MV_CREATION_METADATA" ( + "MV_CREATION_METADATA_ID" bigint NOT NULL, + "DB_NAME" character varying(128) NOT NULL, + "TBL_NAME" character varying(256) NOT NULL, + "TXN_LIST" text +); + +CREATE TABLE "MV_TABLES_USED" ( + "MV_CREATION_METADATA_ID" bigint NOT NULL, + "TBL_ID" bigint NOT NULL +); + +ALTER TABLE ONLY "MV_CREATION_METADATA" + ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID"); + +CREATE INDEX "MV_UNIQUE_TABLE" + ON "MV_CREATION_METADATA" USING btree ("TBL_NAME", "DB_NAME"); + +ALTER TABLE ONLY "MV_TABLES_USED" + ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN KEY ("MV_CREATION_METADATA_ID") REFERENCES "MV_CREATION_METADATA" ("MV_CREATION_METADATA_ID") DEFERRABLE; + +ALTER TABLE ONLY "MV_TABLES_USED" + ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE; + +ALTER TABLE COMPLETED_TXN_COMPONENTS ADD COLUMN CTC_TIMESTAMP timestamp NULL; + +UPDATE COMPLETED_TXN_COMPONENTS SET CTC_TIMESTAMP = CURRENT_TIMESTAMP; + +ALTER TABLE COMPLETED_TXN_COMPONENTS ALTER COLUMN CTC_TIMESTAMP SET NOT NULL; + +ALTER TABLE COMPLETED_TXN_COMPONENTS ALTER COLUMN CTC_TIMESTAMP SET DEFAULT CURRENT_TIMESTAMP; + +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +-- 048-HIVE-18489 +UPDATE "FUNC_RU" + SET "RESOURCE_URI" = 's3a' || SUBSTR("RESOURCE_URI", 4) + WHERE "RESOURCE_URI" LIKE 's3n://%' ; + +UPDATE "SKEWED_COL_VALUE_LOC_MAP" + SET "LOCATION" = 's3a' || SUBSTR("LOCATION", 4) + WHERE "LOCATION" LIKE 's3n://%' ; + +UPDATE "SDS" + SET "LOCATION" = 's3a' || SUBSTR("LOCATION", 4) + WHERE "LOCATION" LIKE 's3n://%' ; + +UPDATE "DBS" + SET "DB_LOCATION_URI" = 's3a' || SUBSTR("DB_LOCATION_URI", 4) + WHERE "DB_LOCATION_URI" LIKE 's3n://%' ; diff --git standalone-metastore/src/main/sql/postgres/upgrade.order.postgres standalone-metastore/src/main/sql/postgres/upgrade.order.postgres new file mode 100644 index 0000000000..d7091b5228 --- /dev/null +++ standalone-metastore/src/main/sql/postgres/upgrade.order.postgres @@ -0,0 +1,16 @@ +0.5.0-to-0.6.0 +0.6.0-to-0.7.0 +0.7.0-to-0.8.0 +0.8.0-to-0.9.0 +0.9.0-to-0.10.0 +0.10.0-to-0.11.0 +0.11.0-to-0.12.0 +0.12.0-to-0.13.0 +0.13.0-to-0.14.0 +0.14.0-to-1.1.0 +1.1.0-to-1.2.0 +1.2.0-to-2.0.0 +2.0.0-to-2.1.0 +2.1.0-to-2.2.0 +2.2.0-to-2.3.0 +2.3.0-to-3.0.0 diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/DbInstallBase.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/DbInstallBase.java new file mode 100644 index 0000000000..4722a568e8 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/DbInstallBase.java @@ -0,0 +1,280 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.dbinstall; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hive.metastore.HiveMetaException; +import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfoFactory; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.tools.MetastoreSchemaTool; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; + +public abstract class DbInstallBase { + + private static final Logger LOG = LoggerFactory.getLogger(DbInstallBase.class); + + private static final String HIVE_USER = "hiveuser"; + protected static final String HIVE_DB = "hivedb"; + private static final String FIRST_VERSION = "1.2.0"; + private static final int MAX_STARTUP_WAIT = 5 * 60 * 1000; + + private String metastoreHome; + + protected abstract String getDockerContainerName(); + protected abstract String getDockerImageName(); + protected abstract String[] getDockerAdditionalArgs(); + protected abstract String getDbType(); + protected abstract String getDbRootUser(); + protected abstract String getDbRootPassword(); + protected abstract String getJdbcDriver(); + protected abstract String getJdbcUrl(); + /** + * URL to use when connecting as root rather than Hive + * @return URL + */ + protected abstract String getInitialJdbcUrl(); + + /** + * Determine if the docker container is ready to use. + * @param logOutput output of docker logs command + * @return true if ready, false otherwise + */ + protected abstract boolean isContainerReady(String logOutput); + protected abstract String getHivePassword(); + + @Before + public void runDockerContainer() throws IOException, InterruptedException { + if (runCmdAndPrintStreams(buildRunCmd(), 600) != 0) { + throw new RuntimeException("Unable to start docker container"); + } + long startTime = System.currentTimeMillis(); + ProcessResults pr; + do { + Thread.sleep(5000); + pr = runCmd(buildLogCmd(), 5); + if (pr.rc != 0) throw new RuntimeException("Failed to get docker logs"); + } while (startTime + MAX_STARTUP_WAIT >= System.currentTimeMillis() && !isContainerReady(pr.stdout)); + if (startTime + MAX_STARTUP_WAIT < System.currentTimeMillis()) { + throw new RuntimeException("Container failed to be ready in " + MAX_STARTUP_WAIT/1000 + + " seconds"); + } + MetastoreSchemaTool.homeDir = metastoreHome = System.getProperty("test.tmp.dir", "target/tmp"); + } + + @After + public void stopAndRmDockerContainer() throws IOException, InterruptedException { + if ("true".equalsIgnoreCase(System.getProperty("metastore.itest.no.stop.container"))) { + LOG.warn("Not stopping container " + getDockerContainerName() + " at user request, please " + + "be sure to shut it down before rerunning the test."); + return; + } + if (runCmdAndPrintStreams(buildStopCmd(), 60) != 0) { + throw new RuntimeException("Unable to stop docker container"); + } + if (runCmdAndPrintStreams(buildRmCmd(), 15) != 0) { + throw new RuntimeException("Unable to remove docker container"); + } + } + + private static class ProcessResults { + final String stdout; + final String stderr; + final int rc; + + public ProcessResults(String stdout, String stderr, int rc) { + this.stdout = stdout; + this.stderr = stderr; + this.rc = rc; + } + } + + private ProcessResults runCmd(String[] cmd, long secondsToWait) throws IOException, + InterruptedException { + LOG.info("Going to run: " + StringUtils.join(cmd, " ")); + Process proc = Runtime.getRuntime().exec(cmd); + if (!proc.waitFor(secondsToWait, TimeUnit.SECONDS)) { + throw new RuntimeException("Process " + cmd[0] + " failed to run in " + secondsToWait + + " seconds"); + } + BufferedReader reader = new BufferedReader(new InputStreamReader(proc.getInputStream())); + final StringBuilder lines = new StringBuilder(); + reader.lines() + .forEach(s -> lines.append(s).append('\n')); + + reader = new BufferedReader(new InputStreamReader(proc.getErrorStream())); + final StringBuilder errLines = new StringBuilder(); + reader.lines() + .forEach(s -> errLines.append(s).append('\n')); + return new ProcessResults(lines.toString(), errLines.toString(), proc.exitValue()); + } + + private int runCmdAndPrintStreams(String[] cmd, long secondsToWait) + throws InterruptedException, IOException { + ProcessResults results = runCmd(cmd, secondsToWait); + LOG.info("Stdout from proc: " + results.stdout); + LOG.info("Stderr from proc: " + results.stderr); + return results.rc; + } + + private int createUser() { + return MetastoreSchemaTool.run(buildArray( + "-createUser", + "-dbType", + getDbType(), + "-userName", + getDbRootUser(), + "-passWord", + getDbRootPassword(), + "-hiveUser", + HIVE_USER, + "-hivePassword", + getHivePassword(), + "-hiveDb", + HIVE_DB, + "-url", + getInitialJdbcUrl(), + "-driver", + getJdbcDriver() + )); + } + + private int installLatest() { + return MetastoreSchemaTool.run(buildArray( + "-initSchema", + "-dbType", + getDbType(), + "-userName", + HIVE_USER, + "-passWord", + getHivePassword(), + "-url", + getJdbcUrl(), + "-driver", + getJdbcDriver() + )); + } + + private int installAVersion(String version) { + return MetastoreSchemaTool.run(buildArray( + "-initSchemaTo", + version, + "-dbType", + getDbType(), + "-userName", + HIVE_USER, + "-passWord", + getHivePassword(), + "-url", + getJdbcUrl(), + "-driver", + getJdbcDriver() + )); + } + + private int upgradeToLatest() { + return MetastoreSchemaTool.run(buildArray( + "-upgradeSchema", + "-dbType", + getDbType(), + "-userName", + HIVE_USER, + "-passWord", + getHivePassword(), + "-url", + getJdbcUrl(), + "-driver", + getJdbcDriver() + )); + } + + protected String[] buildArray(String... strs) { + return strs; + } + + private String getCurrentVersionMinusOne() throws HiveMetaException { + List scripts = MetaStoreSchemaInfoFactory.get( + MetastoreConf.newMetastoreConf(), metastoreHome, getDbType() + ).getUpgradeScripts(FIRST_VERSION); + Assert.assertTrue(scripts.size() > 0); + String lastUpgradePath = scripts.get(scripts.size() - 1); + String version = lastUpgradePath.split("-")[1]; + LOG.info("Current version minus 1 is " + version); + return version; + } + + @Test + public void install() { + Assert.assertEquals(0, createUser()); + Assert.assertEquals(0, installLatest()); + } + + @Test + public void upgrade() throws HiveMetaException { + Assert.assertEquals(0, createUser()); + Assert.assertEquals(0, installAVersion(FIRST_VERSION)); + Assert.assertEquals(0, upgradeToLatest()); + } + + private String[] buildRunCmd() { + List cmd = new ArrayList<>(4 + getDockerAdditionalArgs().length); + cmd.add("docker"); + cmd.add("run"); + cmd.add("--name"); + cmd.add(getDockerContainerName()); + cmd.addAll(Arrays.asList(getDockerAdditionalArgs())); + cmd.add(getDockerImageName()); + return cmd.toArray(new String[cmd.size()]); + } + + private String[] buildStopCmd() { + return buildArray( + "docker", + "stop", + getDockerContainerName() + ); + } + + private String[] buildRmCmd() { + return buildArray( + "docker", + "rm", + getDockerContainerName() + ); + } + + private String[] buildLogCmd() { + return buildArray( + "docker", + "logs", + getDockerContainerName() + ); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestMysql.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestMysql.java new file mode 100644 index 0000000000..9999d8d705 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestMysql.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.dbinstall; + +public class ITestMysql extends DbInstallBase { + + @Override + protected String getDockerImageName() { + return "mariadb:5.5"; + } + + @Override + protected String[] getDockerAdditionalArgs() { + return buildArray( + "-p", + "3306:3306", + "-e", + "MYSQL_ROOT_PASSWORD=" + getDbRootPassword(), + "-d" + ); + } + + @Override + protected String getDbType() { + return "mysql"; + } + + @Override + protected String getDbRootUser() { + return "root"; + } + + @Override + protected String getDbRootPassword() { + return "its-a-secret"; + } + + @Override + protected String getJdbcDriver() { + return org.mariadb.jdbc.Driver.class.getName(); + } + + @Override + protected String getJdbcUrl() { + return "jdbc:mysql://localhost:3306/" + HIVE_DB; + } + + @Override + protected String getInitialJdbcUrl() { + return "jdbc:mysql://localhost:3306/"; + } + + @Override + protected boolean isContainerReady(String logOutput) { + return logOutput.contains("MySQL init process done. Ready for start up."); + } + + @Override + protected String getDockerContainerName() { + return "metastore-test-mysql-install"; + } + + @Override + protected String getHivePassword() { + return "hivepassword"; + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestOracle.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestOracle.java new file mode 100644 index 0000000000..2cff1a5d9c --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestOracle.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.dbinstall; + +public class ITestOracle extends DbInstallBase { + @Override + protected String getDockerContainerName() { + return "metastore-test-oracle-install"; + } + + @Override + protected String getDockerImageName() { + return "alexeiled/docker-oracle-xe-11g"; + } + + @Override + protected String[] getDockerAdditionalArgs() { + return buildArray( + "-p", + "1521:1521", + "-e", + "DEFAULT_SYS_PASS=" + getDbRootPassword(), + "-e", + "ORACLE_ALLOW_REMOTE=true", + "-d" + ); + } + + @Override + protected String getDbType() { + return "oracle"; + } + + @Override + protected String getDbRootUser() { + return "SYS as SYSDBA"; + } + + @Override + protected String getDbRootPassword() { + return "oracle"; + } + + @Override + protected String getJdbcDriver() { + return "oracle.jdbc.OracleDriver"; + } + + @Override + protected String getJdbcUrl() { + return "jdbc:oracle:thin:@//localhost:1521/xe"; + } + + @Override + protected String getInitialJdbcUrl() { + return "jdbc:oracle:thin:@//localhost:1521/xe"; + } + + @Override + protected boolean isContainerReady(String logOutput) { + return logOutput.contains("Oracle started successfully!"); + } + + @Override + protected String getHivePassword() { + return "hivepassword"; + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestPostgres.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestPostgres.java new file mode 100644 index 0000000000..9151ac766e --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestPostgres.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.dbinstall; + +public class ITestPostgres extends DbInstallBase { + @Override + protected String getDockerContainerName() { + return "metastore-test-postgres-install"; + } + + @Override + protected String getDockerImageName() { + return "postgres:9.3"; + } + + @Override + protected String[] getDockerAdditionalArgs() { + return buildArray( + "-p", + "5432:5432", + "-e", + "POSTGRES_PASSWORD=" + getDbRootPassword(), + "-d" + + ); + } + + @Override + protected String getDbType() { + return "postgres"; + } + + @Override + protected String getDbRootUser() { + return "postgres"; + } + + @Override + protected String getDbRootPassword() { + return "its-a-secret"; + } + + @Override + protected String getJdbcDriver() { + return org.postgresql.Driver.class.getName(); + } + + @Override + protected String getJdbcUrl() { + return "jdbc:postgresql://localhost:5432/" + HIVE_DB; + } + + @Override + protected String getInitialJdbcUrl() { + return "jdbc:postgresql://localhost:5432/postgres"; + } + + @Override + protected boolean isContainerReady(String logOutput) { + return logOutput.contains("database system is ready to accept connections"); + } + + @Override + protected String getHivePassword() { + return "hivepassword"; + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestSqlServer.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestSqlServer.java new file mode 100644 index 0000000000..67b6eeeab2 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestSqlServer.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.dbinstall; + +public class ITestSqlServer extends DbInstallBase { + @Override + protected String getDockerContainerName() { + return "metastore-test-mssql-install"; + } + + @Override + protected String getDockerImageName() { + return "microsoft/mssql-server-linux:2017-GA"; + } + + @Override + protected String[] getDockerAdditionalArgs() { + return buildArray( + "-p", + "1433:1433", + "-e", + "ACCEPT_EULA=Y", + "-e", + "SA_PASSWORD=" + getDbRootPassword(), + "-d" + ); + } + + @Override + protected String getDbType() { + return "mssql"; + } + + @Override + protected String getDbRootUser() { + return "SA"; + } + + @Override + protected String getDbRootPassword() { + return "Its-a-s3cret"; + } + + @Override + protected String getJdbcDriver() { + return com.microsoft.sqlserver.jdbc.SQLServerDriver.class.getName(); + //return "com.microsoft.sqlserver.jdbc.SQLServerDriver"; + } + + @Override + protected String getJdbcUrl() { + return "jdbc:sqlserver://localhost:1433;DatabaseName=" + HIVE_DB + ";"; + } + + @Override + protected String getInitialJdbcUrl() { + return "jdbc:sqlserver://localhost:1433"; + } + + @Override + protected boolean isContainerReady(String logOutput) { + return logOutput.contains("Recovery is complete. This is an informational message only. No user action is required."); + } + + @Override + protected String getHivePassword() { + return "h1vePassword!"; + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestMetastoreSchemaTool.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestMetastoreSchemaTool.java new file mode 100644 index 0000000000..8b07e93a9e --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestMetastoreSchemaTool.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.tools; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestMetastoreSchemaTool { + + private String scriptFile = System.getProperty("java.io.tmpdir") + File.separator + "someScript.sql"; + @Mock + private Configuration conf; + private MetastoreSchemaTool.CommandBuilder builder; + private String pasword = "reallySimplePassword"; + + @Before + public void setup() throws IOException { + conf = MetastoreConf.newMetastoreConf(); + File file = new File(scriptFile); + if (!file.exists()) { + file.createNewFile(); + } + builder = new MetastoreSchemaTool.CommandBuilder(conf, null, null, "testUser", pasword, scriptFile); + } + + @After + public void globalAssert() throws IOException { + new File(scriptFile).delete(); + } + + @Test + public void shouldReturnStrippedPassword() throws IOException { + assertFalse(builder.buildToLog().contains(pasword)); + } + + @Test + public void shouldReturnActualPassword() throws IOException { + String[] strings = builder.buildToRun(); + assertTrue(Arrays.asList(strings).contains(pasword)); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java new file mode 100644 index 0000000000..c52729aefa --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java @@ -0,0 +1,467 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.tools; + +import java.io.BufferedWriter; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.io.OutputStream; +import java.io.PrintStream; +import java.net.URI; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Random; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.HiveMetaException; +import org.apache.hadoop.hive.metastore.IMetaStoreSchemaInfo; +import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfoFactory; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TestSchemaToolForMetastore { + private static final Logger LOG = LoggerFactory.getLogger(TestMetastoreSchemaTool.class); + + private MetastoreSchemaTool schemaTool; + private Connection conn; + private Configuration conf; + private String testMetastoreDB; + private PrintStream errStream; + private PrintStream outStream; + + @Before + public void setUp() throws HiveMetaException, IOException { + testMetastoreDB = System.getProperty("java.io.tmpdir") + + File.separator + "test_metastore-" + new Random().nextInt(); + System.setProperty(ConfVars.CONNECTURLKEY.toString(), + "jdbc:derby:" + testMetastoreDB + ";create=true"); + conf = MetastoreConf.newMetastoreConf(); + schemaTool = new MetastoreSchemaTool( + System.getProperty("test.tmp.dir", "target/tmp"), conf, "derby"); + schemaTool.setUserName(MetastoreConf.getVar(schemaTool.getConf(), ConfVars.CONNECTION_USER_NAME)); + schemaTool.setPassWord(MetastoreConf.getPassword(schemaTool.getConf(), ConfVars.PWD)); + System.setProperty("beeLine.system.exit", "true"); + errStream = System.err; + outStream = System.out; + conn = schemaTool.getConnectionToMetastore(false); + } + + @After + public void tearDown() throws IOException, SQLException { + File metaStoreDir = new File(testMetastoreDB); + if (metaStoreDir.exists()) { + FileUtils.forceDeleteOnExit(metaStoreDir); + } + System.setOut(outStream); + System.setErr(errStream); + if (conn != null) { + conn.close(); + } + } + + // Test the sequence validation functionality + @Test + public void testValidateSequences() throws Exception { + schemaTool.doInit(); + + // Test empty database + boolean isValid = schemaTool.validateSequences(conn); + Assert.assertTrue(isValid); + + // Test valid case + String[] scripts = new String[] { + "insert into SEQUENCE_TABLE values('org.apache.hadoop.hive.metastore.model.MDatabase', 100);", + "insert into DBS values(99, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test');" + }; + File scriptFile = generateTestScript(scripts); + schemaTool.runSqlLine(scriptFile.getPath()); + isValid = schemaTool.validateSequences(conn); + Assert.assertTrue(isValid); + + // Test invalid case + scripts = new String[] { + "delete from SEQUENCE_TABLE;", + "delete from DBS;", + "insert into SEQUENCE_TABLE values('org.apache.hadoop.hive.metastore.model.MDatabase', 100);", + "insert into DBS values(102, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test');" + }; + scriptFile = generateTestScript(scripts); + schemaTool.runSqlLine(scriptFile.getPath()); + isValid = schemaTool.validateSequences(conn); + Assert.assertFalse(isValid); + } + + // Test to validate that all tables exist in the HMS metastore. + @Test + public void testValidateSchemaTables() throws Exception { + schemaTool.doInit("1.2.0"); + + boolean isValid = schemaTool.validateSchemaTables(conn); + Assert.assertTrue(isValid); + + // upgrade from 2.0.0 schema and re-validate + schemaTool.doUpgrade("1.2.0"); + isValid = schemaTool.validateSchemaTables(conn); + Assert.assertTrue(isValid); + + // Simulate a missing table scenario by renaming a couple of tables + String[] scripts = new String[] { + "RENAME TABLE SEQUENCE_TABLE to SEQUENCE_TABLE_RENAMED;", + "RENAME TABLE NUCLEUS_TABLES to NUCLEUS_TABLES_RENAMED;" + }; + + File scriptFile = generateTestScript(scripts); + schemaTool.runSqlLine(scriptFile.getPath()); + isValid = schemaTool.validateSchemaTables(conn); + Assert.assertFalse(isValid); + + // Restored the renamed tables + scripts = new String[] { + "RENAME TABLE SEQUENCE_TABLE_RENAMED to SEQUENCE_TABLE;", + "RENAME TABLE NUCLEUS_TABLES_RENAMED to NUCLEUS_TABLES;" + }; + + scriptFile = generateTestScript(scripts); + schemaTool.runSqlLine(scriptFile.getPath()); + isValid = schemaTool.validateSchemaTables(conn); + Assert.assertTrue(isValid); + } + + // Test the validation of incorrect NULL values in the tables + @Test + public void testValidateNullValues() throws Exception { + schemaTool.doInit(); + + // Test empty database + boolean isValid = schemaTool.validateColumnNullValues(conn); + Assert.assertTrue(isValid); + + // Test valid case + createTestHiveTableSchemas(); + isValid = schemaTool.validateColumnNullValues(conn); + + // Test invalid case + String[] scripts = new String[] { + "update TBLS set SD_ID=null" + }; + File scriptFile = generateTestScript(scripts); + schemaTool.runSqlLine(scriptFile.getPath()); + isValid = schemaTool.validateColumnNullValues(conn); + Assert.assertFalse(isValid); + } + + // Test dryrun of schema initialization + @Test + public void testSchemaInitDryRun() throws Exception { + schemaTool.setDryRun(true); + schemaTool.doInit("3.0.0"); + schemaTool.setDryRun(false); + try { + schemaTool.verifySchemaVersion(); + } catch (HiveMetaException e) { + // The connection should fail since it the dry run + return; + } + Assert.fail("Dry run shouldn't create actual metastore"); + } + + // Test dryrun of schema upgrade + @Test + public void testSchemaUpgradeDryRun() throws Exception { + schemaTool.doInit("1.2.0"); + + schemaTool.setDryRun(true); + schemaTool.doUpgrade("1.2.0"); + schemaTool.setDryRun(false); + try { + schemaTool.verifySchemaVersion(); + } catch (HiveMetaException e) { + // The connection should fail since it the dry run + return; + } + Assert.fail("Dry run shouldn't upgrade metastore schema"); + } + + /** + * Test schema initialization + */ + @Test + public void testSchemaInit() throws Exception { + IMetaStoreSchemaInfo metastoreSchemaInfo = MetaStoreSchemaInfoFactory.get(conf, + System.getProperty("test.tmp.dir", "target/tmp"), "derby"); + schemaTool.doInit(metastoreSchemaInfo.getHiveSchemaVersion()); + schemaTool.verifySchemaVersion(); + } + + /** + * Test validation for schema versions + */ + @Test + public void testValidateSchemaVersions() throws Exception { + schemaTool.doInit(); + boolean isValid = schemaTool.validateSchemaVersions(); + // Test an invalid case with multiple versions + String[] scripts = new String[] { + "insert into VERSION values(100, '2.2.0', 'Hive release version 2.2.0')" + }; + File scriptFile = generateTestScript(scripts); + schemaTool.runSqlLine(scriptFile.getPath()); + isValid = schemaTool.validateSchemaVersions(); + Assert.assertFalse(isValid); + + scripts = new String[] { + "delete from VERSION where VER_ID = 100" + }; + scriptFile = generateTestScript(scripts); + schemaTool.runSqlLine(scriptFile.getPath()); + isValid = schemaTool.validateSchemaVersions(); + Assert.assertTrue(isValid); + + // Test an invalid case without version + scripts = new String[] { + "delete from VERSION" + }; + scriptFile = generateTestScript(scripts); + schemaTool.runSqlLine(scriptFile.getPath()); + isValid = schemaTool.validateSchemaVersions(); + Assert.assertFalse(isValid); + } + + /** + * Test schema upgrade + */ + @Test + public void testSchemaUpgrade() throws Exception { + boolean foundException = false; + // Initialize 1.2.0 schema + schemaTool.doInit("1.2.0"); + // verify that driver fails due to older version schema + try { + schemaTool.verifySchemaVersion(); + } catch (HiveMetaException e) { + // Expected to fail due to old schema + foundException = true; + } + if (!foundException) { + throw new Exception( + "Hive operations shouldn't pass with older version schema"); + } + + // Generate dummy pre-upgrade script with errors + String invalidPreUpgradeScript = writeDummyPreUpgradeScript( + 0, "upgrade-2.3.0-to-3.0.0.derby.sql", "foo bar;"); + // Generate dummy pre-upgrade scripts with valid SQL + String validPreUpgradeScript0 = writeDummyPreUpgradeScript( + 1, "upgrade-2.3.0-to-3.0.0.derby.sql", + "CREATE TABLE schema_test0 (id integer);"); + String validPreUpgradeScript1 = writeDummyPreUpgradeScript( + 2, "upgrade-2.3.0-to-3.0.0.derby.sql", + "CREATE TABLE schema_test1 (id integer);"); + + // Capture system out and err + schemaTool.setVerbose(true); + OutputStream stderr = new ByteArrayOutputStream(); + PrintStream errPrintStream = new PrintStream(stderr); + System.setErr(errPrintStream); + OutputStream stdout = new ByteArrayOutputStream(); + PrintStream outPrintStream = new PrintStream(stdout); + System.setOut(outPrintStream); + + // Upgrade schema from 0.7.0 to latest + schemaTool.doUpgrade("1.2.0"); + + LOG.info("stdout is " + stdout.toString()); + LOG.info("stderr is " + stderr.toString()); + + // Verify that the schemaTool ran pre-upgrade scripts and ignored errors + Assert.assertTrue(stderr.toString().contains(invalidPreUpgradeScript)); + Assert.assertTrue(stderr.toString().contains("foo")); + Assert.assertFalse(stderr.toString().contains(validPreUpgradeScript0)); + Assert.assertFalse(stderr.toString().contains(validPreUpgradeScript1)); + Assert.assertTrue(stdout.toString().contains(validPreUpgradeScript0)); + Assert.assertTrue(stdout.toString().contains(validPreUpgradeScript1)); + + // Verify that driver works fine with latest schema + schemaTool.verifySchemaVersion(); + } + + /** + * Test validate uri of locations + */ + @Test + public void testValidateLocations() throws Exception { + schemaTool.doInit(); + URI defaultRoot = new URI("hdfs://myhost.com:8020"); + URI defaultRoot2 = new URI("s3://myhost2.com:8888"); + //check empty DB + boolean isValid = schemaTool.validateLocations(conn, null); + Assert.assertTrue(isValid); + isValid = schemaTool.validateLocations(conn, new URI[] {defaultRoot,defaultRoot2}); + Assert.assertTrue(isValid); + + // Test valid case + String[] scripts = new String[] { + "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role');", + "insert into DBS values(7, 'db with bad port', 'hdfs://myhost.com:8020/', 'haDB', 'public', 'role');", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3,null,'org.apache.hadoop.mapred.TextInputFormat','N','N',null,-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (3 ,1435255431,2,0 ,'hive',0,3,'myView','VIRTUAL_VIEW','select a.col1,a.col2 from foo','select * from foo','n');", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4012 ,1435255431,7,0 ,'hive',0,4000,'mytal4012','MANAGED_TABLE',NULL,NULL,'n');", + "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(1, 1441402388,0, 'd1=1/d2=1',2,2);", + "insert into SKEWED_STRING_LIST values(1);", + "insert into SKEWED_STRING_LIST values(2);", + "insert into SKEWED_COL_VALUE_LOC_MAP values(1,1,'hdfs://myhost.com:8020/user/hive/warehouse/mytal/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/');", + "insert into SKEWED_COL_VALUE_LOC_MAP values(2,2,'s3://myhost.com:8020/user/hive/warehouse/mytal/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/');" + }; + File scriptFile = generateTestScript(scripts); + schemaTool.runSqlLine(scriptFile.getPath()); + isValid = schemaTool.validateLocations(conn, null); + Assert.assertTrue(isValid); + isValid = schemaTool.validateLocations(conn, new URI[] {defaultRoot, defaultRoot2}); + Assert.assertTrue(isValid); + scripts = new String[] { + "delete from SKEWED_COL_VALUE_LOC_MAP;", + "delete from SKEWED_STRING_LIST;", + "delete from PARTITIONS;", + "delete from TBLS;", + "delete from SDS;", + "delete from DBS;", + "insert into DBS values(2, 'my db', '/user/hive/warehouse/mydb', 'mydb', 'public', 'role');", + "insert into DBS values(4, 'my db2', 'hdfs://myhost.com:8020', '', 'public', 'role');", + "insert into DBS values(6, 'db with bad port', 'hdfs://myhost.com:8020:', 'zDB', 'public', 'role');", + "insert into DBS values(7, 'db with bad port', 'hdfs://mynameservice.com/', 'haDB', 'public', 'role');", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','file:///user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');", + "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(1, 1441402388,0, 'd1=1/d2=1',2,2);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4001,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4003,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4004,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4002,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (5000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','file:///user/admin/2016_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (3000 ,1435255431,2,0 ,'hive',0,3000,'mytal3000','MANAGED_TABLE',NULL,NULL,'n');", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4011 ,1435255431,4,0 ,'hive',0,4001,'mytal4011','MANAGED_TABLE',NULL,NULL,'n');", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4012 ,1435255431,4,0 ,'hive',0,4002,'','MANAGED_TABLE',NULL,NULL,'n');", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4013 ,1435255431,4,0 ,'hive',0,4003,'mytal4013','MANAGED_TABLE',NULL,NULL,'n');", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4014 ,1435255431,2,0 ,'hive',0,4003,'','MANAGED_TABLE',NULL,NULL,'n');", + "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(4001, 1441402388,0, 'd1=1/d2=4001',4001,4011);", + "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(4002, 1441402388,0, 'd1=1/d2=4002',4002,4012);", + "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(4003, 1441402388,0, 'd1=1/d2=4003',4003,4013);", + "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(4004, 1441402388,0, 'd1=1/d2=4004',4004,4014);", + "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(5000, 1441402388,0, 'd1=1/d2=5000',5000,2);", + "insert into SKEWED_STRING_LIST values(1);", + "insert into SKEWED_STRING_LIST values(2);", + "insert into SKEWED_COL_VALUE_LOC_MAP values(1,1,'hdfs://yourhost.com:8020/user/hive/warehouse/mytal/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/');", + "insert into SKEWED_COL_VALUE_LOC_MAP values(2,2,'file:///user/admin/warehouse/mytal/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/');" + }; + scriptFile = generateTestScript(scripts); + schemaTool.runSqlLine(scriptFile.getPath()); + isValid = schemaTool.validateLocations(conn, null); + Assert.assertFalse(isValid); + isValid = schemaTool.validateLocations(conn, new URI[] {defaultRoot, defaultRoot2}); + Assert.assertFalse(isValid); + } + + @Test + public void testHiveMetastoreDbPropertiesTable() throws HiveMetaException, IOException { + schemaTool.doInit("3.0.0"); + validateMetastoreDbPropertiesTable(); + } + + @Test + public void testMetastoreDbPropertiesAfterUpgrade() throws HiveMetaException, IOException { + schemaTool.doInit("1.2.0"); + schemaTool.doUpgrade(); + validateMetastoreDbPropertiesTable(); + } + + private File generateTestScript(String [] stmts) throws IOException { + File testScriptFile = File.createTempFile("schematest", ".sql"); + testScriptFile.deleteOnExit(); + FileWriter fstream = new FileWriter(testScriptFile.getPath()); + BufferedWriter out = new BufferedWriter(fstream); + for (String line: stmts) { + out.write(line); + out.newLine(); + } + out.close(); + return testScriptFile; + } + + private void validateMetastoreDbPropertiesTable() throws HiveMetaException, IOException { + boolean isValid = schemaTool.validateSchemaTables(conn); + Assert.assertTrue(isValid); + // adding same property key twice should throw unique key constraint violation exception + String[] scripts = new String[] { + "insert into METASTORE_DB_PROPERTIES values ('guid', 'test-uuid-1', 'dummy uuid 1')", + "insert into METASTORE_DB_PROPERTIES values ('guid', 'test-uuid-2', 'dummy uuid 2')", }; + File scriptFile = generateTestScript(scripts); + Exception ex = null; + try { + schemaTool.runSqlLine(scriptFile.getPath()); + } catch (Exception iox) { + ex = iox; + } + Assert.assertTrue(ex != null && ex instanceof IOException); + } + /** + * Write out a dummy pre-upgrade script with given SQL statement. + */ + private String writeDummyPreUpgradeScript(int index, String upgradeScriptName, + String sql) throws Exception { + String preUpgradeScript = "pre-" + index + "-" + upgradeScriptName; + String dummyPreScriptPath = System.getProperty("test.tmp.dir", "target/tmp") + + File.separatorChar + "scripts" + File.separatorChar + "metastore" + + File.separatorChar + "upgrade" + File.separatorChar + "derby" + + File.separatorChar + preUpgradeScript; + FileWriter fstream = new FileWriter(dummyPreScriptPath); + BufferedWriter out = new BufferedWriter(fstream); + out.write(sql + System.getProperty("line.separator")); + out.close(); + return preUpgradeScript; + } + + // Insert the records in DB to simulate a hive table + private void createTestHiveTableSchemas() throws IOException { + String[] scripts = new String[] { + "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role');", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (3 ,1435255431,2,0 ,'hive',0,2,'aTable','MANAGED_TABLE',NULL,NULL,'n');", + "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(1, 1441402388,0, 'd1=1/d2=1',2,2);" + }; + File scriptFile = generateTestScript(scripts); + schemaTool.runSqlLine(scriptFile.getPath()); + } +}