diff --git itests/hive-blobstore/src/test/queries/clientpositive/insert_empty_into_blobstore.q itests/hive-blobstore/src/test/queries/clientpositive/insert_empty_into_blobstore.q index d4f0c7166f..e1c8636cde 100644 --- itests/hive-blobstore/src/test/queries/clientpositive/insert_empty_into_blobstore.q +++ itests/hive-blobstore/src/test/queries/clientpositive/insert_empty_into_blobstore.q @@ -36,7 +36,7 @@ PARTITIONED BY ( pt string, dt string, hr string) -SKEWED BY (id) ON ('1', '2', '3') STORED AS DIRECTORIES +SKEWED BY (id) ON ('1', '2', '3') LOCATION '${hiveconf:test.blobstore.path.unique}/insert_empty_into_blobstore/blobstore_list_bucketing'; INSERT INTO TABLE blobstore_list_bucketing PARTITION (pt='a', dt='a', hr='a') SELECT id, name, dept FROM empty; diff --git itests/hive-blobstore/src/test/results/clientpositive/insert_empty_into_blobstore.q.out itests/hive-blobstore/src/test/results/clientpositive/insert_empty_into_blobstore.q.out index ccd9ba5400..ce54a6d0d1 100644 --- itests/hive-blobstore/src/test/results/clientpositive/insert_empty_into_blobstore.q.out +++ itests/hive-blobstore/src/test/results/clientpositive/insert_empty_into_blobstore.q.out @@ -80,7 +80,7 @@ PARTITIONED BY ( pt string, dt string, hr string) -SKEWED BY (id) ON ('1', '2', '3') STORED AS DIRECTORIES +SKEWED BY (id) ON ('1', '2', '3') #### A masked pattern was here #### PREHOOK: type: CREATETABLE PREHOOK: Input: ### test.blobstore.path ###/insert_empty_into_blobstore/blobstore_list_bucketing @@ -94,7 +94,7 @@ PARTITIONED BY ( pt string, dt string, hr string) -SKEWED BY (id) ON ('1', '2', '3') STORED AS DIRECTORIES +SKEWED BY (id) ON ('1', '2', '3') #### A masked pattern was here #### POSTHOOK: type: CREATETABLE POSTHOOK: Input: ### test.blobstore.path ###/insert_empty_into_blobstore/blobstore_list_bucketing diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java index 9b50fd4f30..5c21d84365 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java @@ -107,7 +107,7 @@ protected void setUp() { db.createTable(src, cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class); db.loadTable(hadoopDataFile[i], src, - LoadFileType.KEEP_EXISTING, false, false, false, false, null, 0, false); + LoadFileType.KEEP_EXISTING, false, false, false, null, 0, false); i++; } diff --git itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java index 3b22f152e8..cc86eb3271 100644 --- itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java +++ itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java @@ -644,10 +644,10 @@ public void testValidateLocations() throws Exception { "insert into CTLGS values(3, 'test_cat_2', 'description', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb')", "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role', 'test_cat_2')", "insert into DBS values(7, 'db with bad port', 'hdfs://myhost.com:8020/', 'haDB', 'public', 'role', 'test_cat_2')", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3,null,'org.apache.hadoop.mapred.TextInputFormat','N','N',null,-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3,null,'org.apache.hadoop.mapred.TextInputFormat','N',null,-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4000,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n')", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (3 ,1435255431,2,0 ,'hive',0,3,'myView','VIRTUAL_VIEW','select a.col1,a.col2 from foo','select * from foo','n')", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4012 ,1435255431,7,0 ,'hive',0,4000,'mytal4012','MANAGED_TABLE',NULL,NULL,'n')", @@ -674,17 +674,17 @@ public void testValidateLocations() throws Exception { "insert into DBS values(4, 'my db2', 'hdfs://myhost.com:8020', '', 'public', 'role', 'test_cat_2')", "insert into DBS values(6, 'db with bad port', 'hdfs://myhost.com:8020:', 'zDB', 'public', 'role', 'test_cat_2')", "insert into DBS values(7, 'db with bad port', 'hdfs://mynameservice.com/', 'haDB', 'public', 'role', 'test_cat_2')", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','file:///user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','file:///user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n')", "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(1, 1441402388,0, 'd1=1/d2=1',2,2)", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4001,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4003,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4004,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4002,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (5000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','file:///user/admin/2016_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3000,null,'org.apache.hadoop.mapred.TextInputFormat','N','yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4000,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4001,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4003,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4004,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4002,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (5000,null,'org.apache.hadoop.mapred.TextInputFormat','N','file:///user/admin/2016_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (3000 ,1435255431,2,0 ,'hive',0,3000,'mytal3000','MANAGED_TABLE',NULL,NULL,'n')", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4011 ,1435255431,4,0 ,'hive',0,4001,'mytal4011','MANAGED_TABLE',NULL,NULL,'n')", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4012 ,1435255431,4,0 ,'hive',0,4002,'','MANAGED_TABLE',NULL,NULL,'n')", @@ -773,8 +773,8 @@ private void createTestHiveTableSchemas() throws IOException { String[] scripts = new String[] { "insert into CTLGS values(2, 'my_catalog', 'description', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb')", "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8021/user/hive/warehouse/mydb', 'mydb', 'public', 'role', 'my_catalog')", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n')", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (3 ,1435255431,2,0 ,'hive',0,2,'aTable','MANAGED_TABLE',NULL,NULL,'n')", "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(1, 1441402388,0, 'd1=1/d2=1',2,2)" diff --git metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql new file mode 100644 index 0000000000..d1be5edad4 --- /dev/null +++ metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql @@ -0,0 +1,1382 @@ +-- HIVE system db + +DROP DATABASE IF EXISTS SYS CASCADE; +CREATE DATABASE SYS; + +USE SYS; + +CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` ( + `SD_ID` bigint, + `BUCKET_COL_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_BUCKETING_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"BUCKET_COL_NAME\", + \"INTEGER_IDX\" +FROM + \"BUCKETING_COLS\"" +); + +CREATE TABLE IF NOT EXISTS `CDS` ( + `CD_ID` bigint, + CONSTRAINT `SYS_PK_CDS` PRIMARY KEY (`CD_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CD_ID\" +FROM + \"CDS\"" +); + +CREATE TABLE IF NOT EXISTS `COLUMNS_V2` ( + `CD_ID` bigint, + `COMMENT` string, + `COLUMN_NAME` string, + `TYPE_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_COLUMN_V2` PRIMARY KEY (`CD_ID`,`COLUMN_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CD_ID\", + \"COMMENT\", + \"COLUMN_NAME\", + \"TYPE_NAME\", + \"INTEGER_IDX\" +FROM + \"COLUMNS_V2\"" +); + +CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` ( + `DB_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_DATABASE_PARAMS` PRIMARY KEY (`DB_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"DATABASE_PARAMS\"" +); + +CREATE TABLE IF NOT EXISTS `DBS` ( + `DB_ID` bigint, + `DB_LOCATION_URI` string, + `NAME` string, + `OWNER_NAME` string, + `OWNER_TYPE` string, + CONSTRAINT `SYS_PK_DBS` PRIMARY KEY (`DB_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_ID\", + \"DB_LOCATION_URI\", + \"NAME\", + \"OWNER_NAME\", + \"OWNER_TYPE\" +FROM + \"DBS\"" +); + +CREATE TABLE IF NOT EXISTS `DB_PRIVS` ( + `DB_GRANT_ID` bigint, + `CREATE_TIME` int, + `DB_ID` bigint, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `DB_PRIV` string, + CONSTRAINT `SYS_PK_DB_PRIVS` PRIMARY KEY (`DB_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_GRANT_ID\", + \"CREATE_TIME\", + \"DB_ID\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"DB_PRIV\" +FROM + \"DB_PRIVS\"" +); + +CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` ( + `USER_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` string, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `USER_PRIV` string, + CONSTRAINT `SYS_PK_GLOBAL_PRIVS` PRIMARY KEY (`USER_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"USER_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"USER_PRIV\" +FROM + \"GLOBAL_PRIVS\"" +); + +CREATE TABLE IF NOT EXISTS `PARTITIONS` ( + `PART_ID` bigint, + `CREATE_TIME` int, + `LAST_ACCESS_TIME` int, + `PART_NAME` string, + `SD_ID` bigint, + `TBL_ID` bigint, + CONSTRAINT `SYS_PK_PARTITIONS` PRIMARY KEY (`PART_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"CREATE_TIME\", + \"LAST_ACCESS_TIME\", + \"PART_NAME\", + \"SD_ID\", + \"TBL_ID\" +FROM + \"PARTITIONS\"" +); + +CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` ( + `TBL_ID` bigint, + `PKEY_COMMENT` string, + `PKEY_NAME` string, + `PKEY_TYPE` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_PARTITION_KEYS` PRIMARY KEY (`TBL_ID`,`PKEY_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"PKEY_COMMENT\", + \"PKEY_NAME\", + \"PKEY_TYPE\", + \"INTEGER_IDX\" +FROM + \"PARTITION_KEYS\"" +); + +CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` ( + `PART_ID` bigint, + `PART_KEY_VAL` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_PARTITION_KEY_VALS` PRIMARY KEY (`PART_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"PART_KEY_VAL\", + \"INTEGER_IDX\" +FROM + \"PARTITION_KEY_VALS\"" +); + +CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` ( + `PART_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_PARTITION_PARAMS` PRIMARY KEY (`PART_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"PARTITION_PARAMS\"" +); + +CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` ( + `PART_COLUMN_GRANT_ID` bigint, + `COLUMN_NAME` string, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PART_ID` bigint, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `PART_COL_PRIV` string, + CONSTRAINT `SYS_PK_PART_COL_PRIVS` PRIMARY KEY (`PART_COLUMN_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_COLUMN_GRANT_ID\", + \"COLUMN_NAME\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PART_ID\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"PART_COL_PRIV\" +FROM + \"PART_COL_PRIVS\"" +); + +CREATE TABLE IF NOT EXISTS `PART_PRIVS` ( + `PART_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PART_ID` bigint, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `PART_PRIV` string, + CONSTRAINT `SYS_PK_PART_PRIVS` PRIMARY KEY (`PART_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PART_ID\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"PART_PRIV\" +FROM + \"PART_PRIVS\"" +); + +CREATE TABLE IF NOT EXISTS `ROLES` ( + `ROLE_ID` bigint, + `CREATE_TIME` int, + `OWNER_NAME` string, + `ROLE_NAME` string, + CONSTRAINT `SYS_PK_ROLES` PRIMARY KEY (`ROLE_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"ROLE_ID\", + \"CREATE_TIME\", + \"OWNER_NAME\", + \"ROLE_NAME\" +FROM + \"ROLES\"" +); + +CREATE TABLE IF NOT EXISTS `ROLE_MAP` ( + `ROLE_GRANT_ID` bigint, + `ADD_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `ROLE_ID` bigint, + CONSTRAINT `SYS_PK_ROLE_MAP` PRIMARY KEY (`ROLE_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"ROLE_GRANT_ID\", + \"ADD_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"ROLE_ID\" +FROM + \"ROLE_MAP\"" +); + +CREATE TABLE IF NOT EXISTS `SDS` ( + `SD_ID` bigint, + `CD_ID` bigint, + `INPUT_FORMAT` string, + `IS_COMPRESSED` boolean, + `LOCATION` string, + `NUM_BUCKETS` int, + `OUTPUT_FORMAT` string, + `SERDE_ID` bigint, + CONSTRAINT `SYS_PK_SDS` PRIMARY KEY (`SD_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"CD_ID\", + \"INPUT_FORMAT\", + \"IS_COMPRESSED\", + \"LOCATION\", + \"NUM_BUCKETS\", + \"OUTPUT_FORMAT\", + \"SERDE_ID\" +FROM + \"SDS\"" +); + +CREATE TABLE IF NOT EXISTS `SD_PARAMS` ( + `SD_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_SD_PARAMS` PRIMARY KEY (`SD_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"SD_PARAMS\"" +); + +CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` ( + `SEQUENCE_NAME` string, + `NEXT_VAL` bigint, + CONSTRAINT `SYS_PK_SEQUENCE_TABLE` PRIMARY KEY (`SEQUENCE_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SEQUENCE_NAME\", + \"NEXT_VAL\" +FROM + \"SEQUENCE_TABLE\"" +); + +CREATE TABLE IF NOT EXISTS `SERDES` ( + `SERDE_ID` bigint, + `NAME` string, + `SLIB` string, + CONSTRAINT `SYS_PK_SERDES` PRIMARY KEY (`SERDE_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SERDE_ID\", + \"NAME\", + \"SLIB\" +FROM + \"SERDES\"" +); + +CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` ( + `SERDE_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_SERDE_PARAMS` PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SERDE_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"SERDE_PARAMS\"" +); + +CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` ( + `SD_ID` bigint, + `SKEWED_COL_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_COL_NAMES` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"SKEWED_COL_NAME\", + \"INTEGER_IDX\" +FROM + \"SKEWED_COL_NAMES\"" +); + +CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` ( + `SD_ID` bigint, + `STRING_LIST_ID_KID` bigint, + `LOCATION` string, + CONSTRAINT `SYS_PK_COL_VALUE_LOC_MAP` PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"STRING_LIST_ID_KID\", + \"LOCATION\" +FROM + \"SKEWED_COL_VALUE_LOC_MAP\"" +); + +CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` ( + `STRING_LIST_ID` bigint, + CONSTRAINT `SYS_PK_SKEWED_STRING_LIST` PRIMARY KEY (`STRING_LIST_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"STRING_LIST_ID\" +FROM + \"SKEWED_STRING_LIST\"" +); + +CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` ( + `STRING_LIST_ID` bigint, + `STRING_LIST_VALUE` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_STRING_LIST_VALUES` PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"STRING_LIST_ID\", + \"STRING_LIST_VALUE\", + \"INTEGER_IDX\" +FROM + \"SKEWED_STRING_LIST_VALUES\"" +); + +CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` ( + `SD_ID_OID` bigint, + `STRING_LIST_ID_EID` bigint, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_VALUES` PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID_OID\", + \"STRING_LIST_ID_EID\", + \"INTEGER_IDX\" +FROM + \"SKEWED_VALUES\"" +); + +CREATE TABLE IF NOT EXISTS `SORT_COLS` ( + `SD_ID` bigint, + `COLUMN_NAME` string, + `ORDER` int, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SORT_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"COLUMN_NAME\", + \"ORDER\", + \"INTEGER_IDX\" +FROM + \"SORT_COLS\"" +); + +CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` ( + `TBL_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_TABLE_PARAMS` PRIMARY KEY (`TBL_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"TABLE_PARAMS\"" +); + +CREATE TABLE IF NOT EXISTS `TBLS` ( + `TBL_ID` bigint, + `CREATE_TIME` int, + `DB_ID` bigint, + `LAST_ACCESS_TIME` int, + `OWNER` string, + `RETENTION` int, + `SD_ID` bigint, + `TBL_NAME` string, + `TBL_TYPE` string, + `VIEW_EXPANDED_TEXT` string, + `VIEW_ORIGINAL_TEXT` string, + `IS_REWRITE_ENABLED` boolean, + CONSTRAINT `SYS_PK_TBLS` PRIMARY KEY (`TBL_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"CREATE_TIME\", + \"DB_ID\", + \"LAST_ACCESS_TIME\", + \"OWNER\", + \"RETENTION\", + \"SD_ID\", + \"TBL_NAME\", + \"TBL_TYPE\", + \"VIEW_EXPANDED_TEXT\", + \"VIEW_ORIGINAL_TEXT\", + \"IS_REWRITE_ENABLED\" +FROM \"TBLS\"" +); + +CREATE TABLE IF NOT EXISTS `MV_CREATION_METADATA` ( + `MV_CREATION_METADATA_ID` bigint, + `DB_NAME` string, + `TBL_NAME` string, + `TXN_LIST` string, + CONSTRAINT `SYS_PK_MV_CREATION_METADATA` PRIMARY KEY (`MV_CREATION_METADATA_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"MV_CREATION_METADATA_ID\", + \"DB_NAME\", + \"TBL_NAME\", + \"TXN_LIST\" +FROM \"MV_CREATION_METADATA\"" +); + +CREATE TABLE IF NOT EXISTS `MV_TABLES_USED` ( + `MV_CREATION_METADATA_ID` bigint, + `TBL_ID` bigint +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"MV_CREATION_METADATA_ID\", + \"TBL_ID\" +FROM \"MV_TABLES_USED\"" +); + +CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` ( + `TBL_COLUMN_GRANT_ID` bigint, + `COLUMN_NAME` string, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `TBL_COL_PRIV` string, + `TBL_ID` bigint, + CONSTRAINT `SYS_PK_TBL_COL_PRIVS` PRIMARY KEY (`TBL_COLUMN_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_COLUMN_GRANT_ID\", + \"COLUMN_NAME\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"TBL_COL_PRIV\", + \"TBL_ID\" +FROM + \"TBL_COL_PRIVS\"" +); + +CREATE TABLE IF NOT EXISTS `TBL_PRIVS` ( + `TBL_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `TBL_PRIV` string, + `TBL_ID` bigint, + CONSTRAINT `SYS_PK_TBL_PRIVS` PRIMARY KEY (`TBL_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"TBL_PRIV\", + \"TBL_ID\" +FROM + \"TBL_PRIVS\"" +); + +CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` ( + `CS_ID` bigint, + `DB_NAME` string, + `TABLE_NAME` string, + `COLUMN_NAME` string, + `COLUMN_TYPE` string, + `TBL_ID` bigint, + `LONG_LOW_VALUE` bigint, + `LONG_HIGH_VALUE` bigint, + `DOUBLE_HIGH_VALUE` double, + `DOUBLE_LOW_VALUE` double, + `BIG_DECIMAL_LOW_VALUE` string, + `BIG_DECIMAL_HIGH_VALUE` string, + `NUM_NULLS` bigint, + `NUM_DISTINCTS` bigint, + `AVG_COL_LEN` double, + `MAX_COL_LEN` bigint, + `NUM_TRUES` bigint, + `NUM_FALSES` bigint, + `LAST_ANALYZED` bigint, + CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CS_ID\", + \"DB_NAME\", + \"TABLE_NAME\", + \"COLUMN_NAME\", + \"COLUMN_TYPE\", + \"TBL_ID\", + \"LONG_LOW_VALUE\", + \"LONG_HIGH_VALUE\", + \"DOUBLE_HIGH_VALUE\", + \"DOUBLE_LOW_VALUE\", + \"BIG_DECIMAL_LOW_VALUE\", + \"BIG_DECIMAL_HIGH_VALUE\", + \"NUM_NULLS\", + \"NUM_DISTINCTS\", + \"AVG_COL_LEN\", + \"MAX_COL_LEN\", + \"NUM_TRUES\", + \"NUM_FALSES\", + \"LAST_ANALYZED\" +FROM + \"TAB_COL_STATS\"" +); + +CREATE TABLE IF NOT EXISTS `PART_COL_STATS` ( + `CS_ID` bigint, + `DB_NAME` string, + `TABLE_NAME` string, + `PARTITION_NAME` string, + `COLUMN_NAME` string, + `COLUMN_TYPE` string, + `PART_ID` bigint, + `LONG_LOW_VALUE` bigint, + `LONG_HIGH_VALUE` bigint, + `DOUBLE_HIGH_VALUE` double, + `DOUBLE_LOW_VALUE` double, + `BIG_DECIMAL_LOW_VALUE` string, + `BIG_DECIMAL_HIGH_VALUE` string, + `NUM_NULLS` bigint, + `NUM_DISTINCTS` bigint, + `AVG_COL_LEN` double, + `MAX_COL_LEN` bigint, + `NUM_TRUES` bigint, + `NUM_FALSES` bigint, + `LAST_ANALYZED` bigint, + CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CS_ID\", + \"DB_NAME\", + \"TABLE_NAME\", + \"PARTITION_NAME\", + \"COLUMN_NAME\", + \"COLUMN_TYPE\", + \"PART_ID\", + \"LONG_LOW_VALUE\", + \"LONG_HIGH_VALUE\", + \"DOUBLE_HIGH_VALUE\", + \"DOUBLE_LOW_VALUE\", + \"BIG_DECIMAL_LOW_VALUE\", + \"BIG_DECIMAL_HIGH_VALUE\", + \"NUM_NULLS\", + \"NUM_DISTINCTS\", + \"AVG_COL_LEN\", + \"MAX_COL_LEN\", + \"NUM_TRUES\", + \"NUM_FALSES\", + \"LAST_ANALYZED\" +FROM + \"PART_COL_STATS\"" +); + +CREATE TABLE IF NOT EXISTS `VERSION` ( + `VER_ID` BIGINT, + `SCHEMA_VERSION` string, + `VERSION_COMMENT` string, + CONSTRAINT `SYS_PK_VERSION` PRIMARY KEY (`VER_ID`) DISABLE +); + +INSERT INTO `VERSION` VALUES (1, '3.0.0', 'Hive release version 3.0.0'); + +CREATE TABLE IF NOT EXISTS `DB_VERSION` ( + `VER_ID` BIGINT, + `SCHEMA_VERSION` string, + `VERSION_COMMENT` string, + CONSTRAINT `SYS_PK_DB_VERSION` PRIMARY KEY (`VER_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"VER_ID\", + \"SCHEMA_VERSION\", + \"VERSION_COMMENT\" +FROM + \"VERSION\"" +); + +CREATE TABLE IF NOT EXISTS `FUNCS` ( + `FUNC_ID` bigint, + `CLASS_NAME` string, + `CREATE_TIME` int, + `DB_ID` bigint, + `FUNC_NAME` string, + `FUNC_TYPE` int, + `OWNER_NAME` string, + `OWNER_TYPE` string, + CONSTRAINT `SYS_PK_FUNCS` PRIMARY KEY (`FUNC_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"FUNC_ID\", + \"CLASS_NAME\", + \"CREATE_TIME\", + \"DB_ID\", + \"FUNC_NAME\", + \"FUNC_TYPE\", + \"OWNER_NAME\", + \"OWNER_TYPE\" +FROM + \"FUNCS\"" +); + +-- CREATE TABLE IF NOT EXISTS `FUNC_RU` ( +-- `FUNC_ID` bigint, +-- `RESOURCE_TYPE` int, +-- `RESOURCE_URI` string, +-- `INTEGER_IDX` int, +-- CONSTRAINT `SYS_PK_FUNCS_RU` PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`) DISABLE +-- ) +-- STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +-- TBLPROPERTIES ( +-- "hive.sql.database.type" = "METASTORE", +-- "hive.sql.query" = "SELECT * FROM FUNCS_RU" +-- ); + +CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS` +( + `CHILD_CD_ID` bigint, + `CHILD_INTEGER_IDX` int, + `CHILD_TBL_ID` bigint, + `PARENT_CD_ID` bigint, + `PARENT_INTEGER_IDX` int, + `PARENT_TBL_ID` bigint, + `POSITION` bigint, + `CONSTRAINT_NAME` string, + `CONSTRAINT_TYPE` string, + `UPDATE_RULE` string, + `DELETE_RULE` string, + `ENABLE_VALIDATE_RELY` int, + `DEFAULT_VALUE` string, + CONSTRAINT `SYS_PK_KEY_CONSTRAINTS` PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CHILD_CD_ID\", + \"CHILD_INTEGER_IDX\", + \"CHILD_TBL_ID\", + \"PARENT_CD_ID\", + \"PARENT_INTEGER_IDX\", + \"PARENT_TBL_ID\", + \"POSITION\", + \"CONSTRAINT_NAME\", + \"CONSTRAINT_TYPE\", + \"UPDATE_RULE\", + \"DELETE_RULE\", + \"ENABLE_VALIDATE_RELY\", + \"DEFAULT_VALUE\" +FROM + \"KEY_CONSTRAINTS\"" +); + +CREATE VIEW `TABLE_STATS_VIEW` AS +SELECT + `TBL_ID`, + max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, + max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, + max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, + max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, + max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, + max(CASE `PARAM_KEY` WHEN 'transient_lastDdlTime' THEN `PARAM_VALUE` END) AS TRANSIENT_LAST_DDL_TIME +FROM `TABLE_PARAMS` GROUP BY `TBL_ID`; + +CREATE VIEW `PARTITION_STATS_VIEW` AS +SELECT + `PART_ID`, + max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, + max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, + max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, + max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, + max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, + max(CASE `PARAM_KEY` WHEN 'transient_lastDdlTime' THEN `PARAM_VALUE` END) AS TRANSIENT_LAST_DDL_TIME +FROM `PARTITION_PARAMS` GROUP BY `PART_ID`; + +CREATE TABLE IF NOT EXISTS `WM_RESOURCEPLANS` ( + `NAME` string, + `STATUS` string, + `QUERY_PARALLELISM` int, + `DEFAULT_POOL_PATH` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\", + \"STATUS\", + \"WM_RESOURCEPLAN\".\"QUERY_PARALLELISM\", + \"WM_POOL\".\"PATH\" +FROM + \"WM_RESOURCEPLAN\" LEFT OUTER JOIN \"WM_POOL\" ON \"WM_RESOURCEPLAN\".\"DEFAULT_POOL_ID\" = \"WM_POOL\".\"POOL_ID\"" +); + +CREATE TABLE IF NOT EXISTS `WM_TRIGGERS` ( + `RP_NAME` string, + `NAME` string, + `TRIGGER_EXPRESSION` string, + `ACTION_EXPRESSION` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + r.\"NAME\" AS RP_NAME, + t.\"NAME\" AS NAME, + \"TRIGGER_EXPRESSION\", + \"ACTION_EXPRESSION\" +FROM + \"WM_TRIGGER\" t +JOIN + \"WM_RESOURCEPLAN\" r +ON + t.\"RP_ID\" = r.\"RP_ID\"" +); + +CREATE TABLE IF NOT EXISTS `WM_POOLS` ( + `RP_NAME` string, + `PATH` string, + `ALLOC_FRACTION` double, + `QUERY_PARALLELISM` int, + `SCHEDULING_POLICY` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\", + \"WM_POOL\".\"PATH\", + \"WM_POOL\".\"ALLOC_FRACTION\", + \"WM_POOL\".\"QUERY_PARALLELISM\", + \"WM_POOL\".\"SCHEDULING_POLICY\" +FROM + \"WM_POOL\" +JOIN + \"WM_RESOURCEPLAN\" +ON + \"WM_POOL\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\"" +); + +CREATE TABLE IF NOT EXISTS `WM_POOLS_TO_TRIGGERS` ( + `RP_NAME` string, + `POOL_PATH` string, + `TRIGGER_NAME` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME, + \"WM_POOL\".\"PATH\" AS POOL_PATH, + \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME +FROM \"WM_POOL_TO_TRIGGER\" + JOIN \"WM_POOL\" ON \"WM_POOL_TO_TRIGGER\".\"POOL_ID\" = \"WM_POOL\".\"POOL_ID\" + JOIN \"WM_TRIGGER\" ON \"WM_POOL_TO_TRIGGER\".\"TRIGGER_ID\" = \"WM_TRIGGER\".\"TRIGGER_ID\" + JOIN \"WM_RESOURCEPLAN\" ON \"WM_POOL\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" +UNION +SELECT + \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME, + '' AS POOL_PATH, + \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME +FROM \"WM_TRIGGER\" + JOIN \"WM_RESOURCEPLAN\" ON \"WM_TRIGGER\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" +WHERE CAST(\"WM_TRIGGER\".\"IS_IN_UNMANAGED\" AS CHAR) IN ('1', 't') +" +); + +CREATE TABLE IF NOT EXISTS `WM_MAPPINGS` ( + `RP_NAME` string, + `ENTITY_TYPE` string, + `ENTITY_NAME` string, + `POOL_PATH` string, + `ORDERING` int +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\", + \"ENTITY_TYPE\", + \"ENTITY_NAME\", + case when \"WM_POOL\".\"PATH\" is null then '' else \"WM_POOL\".\"PATH\" end, + \"ORDERING\" +FROM \"WM_MAPPING\" +JOIN \"WM_RESOURCEPLAN\" ON \"WM_MAPPING\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" +LEFT OUTER JOIN \"WM_POOL\" ON \"WM_POOL\".\"POOL_ID\" = \"WM_MAPPING\".\"POOL_ID\" +" +); + +DROP DATABASE IF EXISTS INFORMATION_SCHEMA CASCADE; +CREATE DATABASE INFORMATION_SCHEMA; + +USE INFORMATION_SCHEMA; + +CREATE VIEW IF NOT EXISTS `SCHEMATA` +( + `CATALOG_NAME`, + `SCHEMA_NAME`, + `SCHEMA_OWNER`, + `DEFAULT_CHARACTER_SET_CATALOG`, + `DEFAULT_CHARACTER_SET_SCHEMA`, + `DEFAULT_CHARACTER_SET_NAME`, + `SQL_PATH` +) AS +SELECT DISTINCT + 'default', + D.`NAME`, + D.`OWNER_NAME`, + cast(null as string), + cast(null as string), + cast(null as string), + `DB_LOCATION_URI` +FROM + `sys`.`DBS` D, `sys`.`TBLS` T, `sys`.`TBL_PRIVS` P +WHERE + NOT restrict_information_schema() OR + D.`DB_ID` = T.`DB_ID` + AND T.`TBL_ID` = P.`TBL_ID` + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')); + +CREATE VIEW IF NOT EXISTS `TABLES` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `TABLE_TYPE`, + `SELF_REFERENCING_COLUMN_NAME`, + `REFERENCE_GENERATION`, + `USER_DEFINED_TYPE_CATALOG`, + `USER_DEFINED_TYPE_SCHEMA`, + `USER_DEFINED_TYPE_NAME`, + `IS_INSERTABLE_INTO`, + `IS_TYPED`, + `COMMIT_ACTION` +) AS +SELECT DISTINCT + 'default', + D.NAME, + T.TBL_NAME, + IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'VIEW', 'BASE_TABLE'), + cast(null as string), + cast(null as string), + cast(null as string), + cast(null as string), + cast(null as string), + IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'NO', 'YES'), + 'NO', + cast(null as string) +FROM + `sys`.`TBLS` T, `sys`.`DBS` D, `sys`.`TBL_PRIVS` P +WHERE + D.`DB_ID` = T.`DB_ID` + AND (NOT restrict_information_schema() OR T.`TBL_ID` = P.`TBL_ID` + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT'); + +CREATE VIEW IF NOT EXISTS `TABLE_PRIVILEGES` +( + `GRANTOR`, + `GRANTEE`, + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `PRIVILEGE_TYPE`, + `IS_GRANTABLE`, + `WITH_HIERARCHY` +) AS +SELECT DISTINCT + P.`GRANTOR`, + P.`PRINCIPAL_NAME`, + 'default', + D.`NAME`, + T.`TBL_NAME`, + P.`TBL_PRIV`, + IF (P.`GRANT_OPTION` == 0, 'NO', 'YES'), + 'NO' +FROM + `sys`.`TBL_PRIVS` P, + `sys`.`TBLS` T, + `sys`.`DBS` D, + `sys`.`TBL_PRIVS` P2 +WHERE + P.`TBL_ID` = T.`TBL_ID` + AND T.`DB_ID` = D.`DB_ID` + AND (NOT restrict_information_schema() OR + P.`TBL_ID` = P2.`TBL_ID` AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` + AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP')) + AND P2.`TBL_PRIV`='SELECT'); + +CREATE VIEW IF NOT EXISTS `COLUMNS` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `COLUMN_NAME`, + `ORDINAL_POSITION`, + `COLUMN_DEFAULT`, + `IS_NULLABLE`, + `DATA_TYPE`, + `CHARACTER_MAXIMUM_LENGTH`, + `CHARACTER_OCTET_LENGTH`, + `NUMERIC_PRECISION`, + `NUMERIC_PRECISION_RADIX`, + `NUMERIC_SCALE`, + `DATETIME_PRECISION`, + `INTERVAL_TYPE`, + `INTERVAL_PRECISION`, + `CHARACTER_SET_CATALOG`, + `CHARACTER_SET_SCHEMA`, + `CHARACTER_SET_NAME`, + `COLLATION_CATALOG`, + `COLLATION_SCHEMA`, + `COLLATION_NAME`, + `UDT_CATALOG`, + `UDT_SCHEMA`, + `UDT_NAME`, + `SCOPE_CATALOG`, + `SCOPE_SCHEMA`, + `SCOPE_NAME`, + `MAXIMUM_CARDINALITY`, + `DTD_IDENTIFIER`, + `IS_SELF_REFERENCING`, + `IS_IDENTITY`, + `IDENTITY_GENERATION`, + `IDENTITY_START`, + `IDENTITY_INCREMENT`, + `IDENTITY_MAXIMUM`, + `IDENTITY_MINIMUM`, + `IDENTITY_CYCLE`, + `IS_GENERATED`, + `GENERATION_EXPRESSION`, + `IS_SYSTEM_TIME_PERIOD_START`, + `IS_SYSTEM_TIME_PERIOD_END`, + `SYSTEM_TIME_PERIOD_TIMESTAMP_GENERATION`, + `IS_UPDATABLE`, + `DECLARED_DATA_TYPE`, + `DECLARED_NUMERIC_PRECISION`, + `DECLARED_NUMERIC_SCALE` +) AS +SELECT DISTINCT + 'default', + D.NAME, + T.TBL_NAME, + C.COLUMN_NAME, + C.INTEGER_IDX, + cast (null as string), + 'YES', + C.TYPE_NAME as TYPE_NAME, + CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 + WHEN lower(C.TYPE_NAME) = 'float' THEN 23 + WHEN lower(C.TYPE_NAME) = 'double' THEN 53 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'float' THEN 2 + WHEN lower(C.TYPE_NAME) = 'double' THEN 2 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+),(\\d+)',2) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+),(\\d+)',2) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'date' THEN 0 + WHEN lower(C.TYPE_NAME) = 'timestamp' THEN 9 + ELSE null END, + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + C.CD_ID, + 'NO', + 'NO', + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + 'NEVER', + cast (null as string), + 'NO', + 'NO', + cast (null as string), + 'YES', + C.TYPE_NAME as DECLARED_DATA_TYPE, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 + WHEN lower(C.TYPE_NAME) = 'float' THEN 23 + WHEN lower(C.TYPE_NAME) = 'double' THEN 53 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'float' THEN 2 + WHEN lower(C.TYPE_NAME) = 'double' THEN 2 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 + ELSE null END +FROM + `sys`.`COLUMNS_V2` C, + `sys`.`SDS` S, + `sys`.`TBLS` T, + `sys`.`DBS` D, + `sys`.`TBL_COL_PRIVS` P +WHERE + S.`SD_ID` = T.`SD_ID` + AND T.`DB_ID` = D.`DB_ID` + AND C.`CD_ID` = S.`CD_ID` + AND (NOT restrict_information_schema() OR + T.`TBL_ID` = P.`TBL_ID` + AND C.`COLUMN_NAME` = P.`COLUMN_NAME` + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_COL_PRIV`='SELECT'); + +CREATE VIEW IF NOT EXISTS `COLUMN_PRIVILEGES` +( + `GRANTOR`, + `GRANTEE`, + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `COLUMN_NAME`, + `PRIVILEGE_TYPE`, + `IS_GRANTABLE` +) AS +SELECT DISTINCT + P.`GRANTOR`, + P.`PRINCIPAL_NAME`, + 'default', + D.`NAME`, + T.`TBL_NAME`, + P.`COLUMN_NAME`, + P.`TBL_COL_PRIV`, + IF (P.`GRANT_OPTION` == 0, 'NO', 'YES') +FROM + `sys`.`TBL_COL_PRIVS` P, + `sys`.`TBLS` T, + `sys`.`DBS` D, + `sys`.`SDS` S, + `sys`.`TBL_PRIVS` P2 +WHERE + S.`SD_ID` = T.`SD_ID` + AND T.`DB_ID` = D.`DB_ID` + AND P.`TBL_ID` = T.`TBL_ID` + AND (NOT restrict_information_schema() OR + P.`TBL_ID` = P2.`TBL_ID` AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` + AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP')) + AND P2.`TBL_PRIV`='SELECT'); + +CREATE VIEW IF NOT EXISTS `VIEWS` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `VIEW_DEFINITION`, + `CHECK_OPTION`, + `IS_UPDATABLE`, + `IS_INSERTABLE_INTO`, + `IS_TRIGGER_UPDATABLE`, + `IS_TRIGGER_DELETABLE`, + `IS_TRIGGER_INSERTABLE_INTO` +) AS +SELECT DISTINCT + 'default', + D.NAME, + T.TBL_NAME, + T.VIEW_ORIGINAL_TEXT, + CAST(NULL as string), + false, + false, + false, + false, + false +FROM + `sys`.`DBS` D, + `sys`.`TBLS` T, + `sys`.`TBL_PRIVS` P +WHERE + D.`DB_ID` = T.`DB_ID` + AND length(T.VIEW_ORIGINAL_TEXT) > 0 + AND (NOT restrict_information_schema() OR + T.`TBL_ID` = P.`TBL_ID` + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT'); diff --git ql/pom.xml ql/pom.xml index d52c307cff..abbcb18e46 100644 --- ql/pom.xml +++ ql/pom.xml @@ -819,7 +819,7 @@ ${basedir}/src/java ${basedir}/src/test - + org.antlr antlr3-maven-plugin diff --git ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 8baf309e7f..4ea567f6b4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -246,8 +246,6 @@ INVALID_TABLE_IN_ON_CLAUSE_OF_MERGE(10149, "No columns from target table ''{0}'' found in ON " + "clause ''{1}'' of MERGE statement.", true), - LOAD_INTO_STORED_AS_DIR(10195, "A stored-as-directories table cannot be used as target for LOAD"), - ALTER_TBL_STOREDASDIR_NOT_SKEWED(10196, "This operation is only valid on skewed table."), ALTER_TBL_SKEWED_LOC_NO_LOC(10197, "Alter table skewed location doesn't have locations."), ALTER_TBL_SKEWED_LOC_NO_MAP(10198, "Alter table skewed location doesn't have location map."), SKEWED_TABLE_NO_COLUMN_NAME(10200, "No skewed column name."), diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 16d08543ba..bc5862f900 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -987,7 +987,6 @@ private DataOutputStream getOutputStream(Path outputFile) throws HiveException { private int mergeFiles(Hive db, AlterTablePartMergeFilesDesc mergeFilesDesc, DriverContext driverContext) throws HiveException { ListBucketingCtx lbCtx = mergeFilesDesc.getLbCtx(); - boolean lbatc = lbCtx == null ? false : lbCtx.isSkewedStoredAsDir(); int lbd = lbCtx == null ? 0 : lbCtx.calculateListBucketingLevel(); // merge work only needs input and output. @@ -1000,7 +999,6 @@ private int mergeFiles(Hive db, AlterTablePartMergeFilesDesc mergeFilesDesc, pathToAliases.put(mergeFilesDesc.getInputDir().get(0), inputDirstr); mergeWork.setPathToAliases(pathToAliases); mergeWork.setListBucketingCtx(mergeFilesDesc.getLbCtx()); - mergeWork.resolveConcatenateMerge(db.getConf()); mergeWork.setMapperCannotSpanPartns(true); mergeWork.setSourceTableInputFormat(mergeFilesDesc.getInputFormatClass().getName()); final FileMergeDesc fmd; @@ -1013,7 +1011,7 @@ private int mergeFiles(Hive db, AlterTablePartMergeFilesDesc mergeFilesDesc, fmd.setDpCtx(null); fmd.setHasDynamicPartitions(false); - fmd.setListBucketingAlterTableConcatenate(lbatc); + fmd.setListBucketingAlterTableConcatenate(false); fmd.setListBucketingDepth(lbd); fmd.setOutputPath(mergeFilesDesc.getOutputDir()); @@ -2673,9 +2671,6 @@ else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) { colValueList.add("('" + StringUtils.join(colValues, "','") + "')"); } tbl_skewedinfo.append(StringUtils.join(colValueList, ",") + ")"); - if (tbl.isStoredAsSubDirectories()) { - tbl_skewedinfo.append("\n STORED AS DIRECTORIES"); - } } // Row format (SerDe) @@ -4328,8 +4323,6 @@ private static StorageDescriptor retrieveStorageDescriptor(Table tbl, Partition tbl.setSkewedColNames(skewedColNames); tbl.setSkewedColValues(skewedValues); } - - tbl.setStoredAsSubDirectories(alterTbl.isStoredAsSubDirectories()); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.OWNER) { if (alterTbl.getOwnerPrincipal() != null) { tbl.setOwner(alterTbl.getOwnerPrincipal().getName()); @@ -4405,24 +4398,6 @@ private static StorageDescriptor retrieveStorageDescriptor(Table tbl, Partition return result; } - private void checkMmLb(Table tbl) throws HiveException { - if (!tbl.isStoredAsSubDirectories()) { - return; - } - // TODO [MM gap?]: by design; no-one seems to use LB tables. They will work, but not convert. - // It's possible to work around this by re-creating and re-inserting the table. - throw new HiveException("Converting list bucketed tables stored as subdirectories " - + " to MM is not supported. Please re-create a table in the desired format."); - } - - private void checkMmLb(Partition part) throws HiveException { - if (!part.isStoredAsSubDirectories()) { - return; - } - throw new HiveException("Converting list bucketed tables stored as subdirectories " - + " to MM is not supported. Please re-create a table in the desired format."); - } - private List> generateAddMmTasks(Table tbl, Long writeId) throws HiveException { // We will move all the files in the table/partition directories into the first MM // directory, then commit the first write ID. @@ -4441,7 +4416,6 @@ private void checkMmLb(Partition part) throws HiveException { Iterator partIter = parts.iterator(); while (partIter.hasNext()) { Partition part = partIter.next(); - checkMmLb(part); Path src = part.getDataLocation(), tgt = new Path(src, mmDir); srcs.add(src); tgts.add(tgt); @@ -4450,7 +4424,6 @@ private void checkMmLb(Partition part) throws HiveException { } } } else { - checkMmLb(tbl); Path src = tbl.getDataLocation(), tgt = new Path(src, mmDir); srcs.add(src); tgts.add(tgt); @@ -4480,19 +4453,6 @@ private void checkMmLb(Partition part) throws HiveException { if (!isFromMmTable && isToMmTable) { if (!HiveConf.getBoolVar(conf, ConfVars.HIVE_MM_ALLOW_ORIGINALS)) { result = generateAddMmTasks(tbl, alterTbl.getWriteId()); - } else { - if (tbl.getPartitionKeys().size() > 0) { - Hive db = getHive(); - PartitionIterable parts = new PartitionIterable(db, tbl, null, - HiveConf.getIntVar(conf, ConfVars.METASTORE_BATCH_RETRIEVE_MAX)); - Iterator partIter = parts.iterator(); - while (partIter.hasNext()) { - Partition part0 = partIter.next(); - checkMmLb(part0); - } - } else { - checkMmLb(tbl); - } } } else if (isFromMmTable && !isToMmTable) { throw new HiveException("Cannot convert an ACID table to non-ACID"); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 9c57eff2e8..deb5828a73 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.io.Serializable; -import java.io.StringWriter; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -31,8 +30,7 @@ import java.util.Map; import java.util.Properties; import java.util.Set; - -import com.google.common.collect.Lists; +import java.util.function.BiFunction; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -41,8 +39,8 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConfUtil; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.conf.HiveConfUtil; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -69,7 +67,10 @@ import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.ql.stats.StatsCollectionContext; import org.apache.hadoop.hive.ql.stats.StatsPublisher; -import org.apache.hadoop.hive.serde2.*; +import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.SerDeStats; +import org.apache.hadoop.hive.serde2.Serializer; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption; @@ -83,26 +84,12 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hive.common.util.HiveStringUtils; -import org.apache.hive.common.util.Murmur3; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.Serializable; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.function.BiFunction; - -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_TEMPORARY_TABLE_STORAGE; +import com.google.common.collect.Lists; /** * File Sink operator implementation. @@ -130,7 +117,6 @@ protected transient RecordWriter[] rowOutWriters; // row specific RecordWriters protected transient int maxPartitions; protected transient ListBucketingCtx lbCtx; - protected transient boolean isSkewedStoredAsSubDirectories; protected transient boolean[] statsFromRecordWriter; protected transient boolean isCollectRWStats; private transient FSPaths prevFsp; @@ -241,7 +227,7 @@ private void commit(FileSystem fs, List commitPaths) throws HiveException private void commitOneOutPath(int idx, FileSystem fs, List commitPaths) throws IOException, HiveException { - if ((bDynParts || isSkewedStoredAsSubDirectories) + if ((bDynParts) && !fs.exists(finalPaths[idx].getParent())) { if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("commit making path for dyn/skew: " + finalPaths[idx].getParent()); @@ -292,13 +278,12 @@ public void abortWriters(FileSystem fs, boolean abort, boolean delete) throws Hi } } - public void initializeBucketPaths(int filesIdx, String taskId, boolean isNativeTable, - boolean isSkewedStoredAsSubDirectories) { + public void initializeBucketPaths(int filesIdx, String taskId, boolean isNativeTable) { if (isNativeTable) { String extension = Utilities.getFileExtension(jc, isCompressed, hiveOutputFormat); String taskWithExt = extension == null ? taskId : taskId + extension; if (!isMmTable) { - if (!bDynParts && !isSkewedStoredAsSubDirectories) { + if (!bDynParts) { finalPaths[filesIdx] = new Path(parent, taskWithExt); } else { finalPaths[filesIdx] = new Path(buildTmpPath(), taskWithExt); @@ -527,13 +512,9 @@ protected void initializeOp(Configuration hconf) throws HiveException { dpSetup(); } - if (lbCtx != null) { - lbSetup(); - } - if (!bDynParts) { fsp = new FSPaths(specPath, conf.isMmTable()); - fsp.subdirAfterTxn = combinePathFragments(generateListBucketingDirName(null), unionPath); + fsp.subdirAfterTxn = unionPath; if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("creating new paths " + System.identityHashCode(fsp) + " from ctor; childSpec " + unionPath + ": tmpPath " + fsp.buildTmpPath() @@ -543,9 +524,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { // Create all the files - this is required because empty files need to be created for // empty buckets // createBucketFiles(fsp); - if (!this.isSkewedStoredAsSubDirectories) { - valToPaths.put("", fsp); // special entry for non-DP case - } + valToPaths.put("", fsp); } final StoragePolicyValue tmpStorage = StoragePolicyValue.lookup(HiveConf @@ -617,13 +596,6 @@ private void logOutputFormatError(Configuration hconf, HiveException ex) { LOG.error(errorWriter.toString(), ex); } - /** - * Initialize list bucketing information - */ - private void lbSetup() { - this.isSkewedStoredAsSubDirectories = ((lbCtx == null) ? false : lbCtx.isSkewedStoredAsDir()); - } - /** * Set up for dynamic partitioning including a new ObjectInspector for the output row. */ @@ -715,7 +687,7 @@ protected void createBucketFiles(FSPaths fsp) throws HiveException { protected void createBucketForFileIdx(FSPaths fsp, int filesIdx) throws HiveException { try { - fsp.initializeBucketPaths(filesIdx, taskId, isNativeTable(), isSkewedStoredAsSubDirectories); + fsp.initializeBucketPaths(filesIdx, taskId, isNativeTable()); if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("createBucketForFileIdx " + filesIdx + ": final path " + fsp.finalPaths[filesIdx] + "; out path " + fsp.outPaths[filesIdx] +" (spec path " + specPath + ", tmp path " @@ -851,18 +823,9 @@ protected boolean updateProgress() { @Override public void process(Object row, int tag) throws HiveException { runTimeNumRows++; - /* Create list bucketing sub-directory only if stored-as-directories is on. */ - String lbDirName = null; - lbDirName = (lbCtx == null) ? null : generateListBucketingDirName(row); if (!bDynParts && !filesCreated) { - if (lbDirName != null) { - if (valToPaths.get(lbDirName) == null) { - createNewPaths(null, lbDirName); - } - } else { - createBucketFiles(fsp); - } + createBucketFiles(fsp); } try { @@ -902,19 +865,13 @@ public void process(Object row, int tag) throws HiveException { dpCtx.getWhiteListPattern().toString() + "'. " + "(configure with " + HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.varname + ")"); } - fpaths = getDynOutPaths(dpVals, lbDirName); + fpaths = getDynOutPaths(dpVals); // use SubStructObjectInspector to serialize the non-partitioning columns in the input row recordValue = serializer.serialize(row, subSetOI); } else { - if (lbDirName != null) { - fpaths = valToPaths.get(lbDirName); - if (fpaths == null) { - fpaths = createNewPaths(null, lbDirName); - } - } else { - fpaths = fsp; - } + fpaths = fsp; + recordValue = serializer.serialize(row, inputObjInspectors[0]); // if serializer is ThriftJDBCBinarySerDe, then recordValue is null if the buffer is not full (the size of buffer // is kept track of in the SerDe) @@ -1065,11 +1022,11 @@ assert getConf().getWriteType() != AcidUtils.Operation.DELETE && * @return * @throws HiveException */ - private FSPaths createNewPaths(String dpDir, String lbDir) throws HiveException { + private FSPaths createNewPaths(String dpDir) throws HiveException { FSPaths fsp2 = new FSPaths(specPath, conf.isMmTable()); - fsp2.subdirAfterTxn = combinePathFragments(lbDir, unionPath); + fsp2.subdirAfterTxn = unionPath; fsp2.subdirBeforeTxn = dpDir; - String pathKey = combinePathFragments(dpDir, lbDir); + String pathKey = dpDir; if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("creating new paths {} for {}, childSpec {}: tmpPath {}," + " task path {}", System.identityHashCode(fsp2), pathKey, unionPath, @@ -1086,67 +1043,7 @@ private FSPaths createNewPaths(String dpDir, String lbDir) throws HiveException return fsp2; } - private static String combinePathFragments(String first, String second) { - return first == null ? second : (second == null ? first : first + Path.SEPARATOR + second); - } - - /** - * Generate list bucketing directory name from a row. - * @param row row to process. - * @return directory name. - */ - protected String generateListBucketingDirName(Object row) { - if (!this.isSkewedStoredAsSubDirectories) { - return null; - } - - String lbDirName = null; - List skewedCols = lbCtx.getSkewedColNames(); - List> allSkewedVals = lbCtx.getSkewedColValues(); - Map, String> locationMap = lbCtx.getLbLocationMap(); - - if (row != null) { - List standObjs = new ArrayList(); - List skewedValsCandidate = null; - /* Convert input row to standard objects. */ - ObjectInspectorUtils.copyToStandardObject(standObjs, row, - (StructObjectInspector) inputObjInspectors[0], ObjectInspectorCopyOption.WRITABLE); - - assert (standObjs.size() >= skewedCols.size()) : - "The row has less number of columns than no. of skewed column."; - - skewedValsCandidate = new ArrayList(skewedCols.size()); - for (SkewedColumnPositionPair posPair : lbCtx.getRowSkewedIndex()) { - skewedValsCandidate.add(posPair.getSkewColPosition(), - standObjs.get(posPair.getTblColPosition()).toString()); - } - /* The row matches skewed column names. */ - if (allSkewedVals.contains(skewedValsCandidate)) { - /* matches skewed values. */ - lbDirName = FileUtils.makeListBucketingDirName(skewedCols, skewedValsCandidate); - locationMap.put(skewedValsCandidate, lbDirName); - } else { - lbDirName = createDefaultLbDir(skewedCols, locationMap); - } - } else { - lbDirName = createDefaultLbDir(skewedCols, locationMap); - } - return lbDirName; - } - - private String createDefaultLbDir(List skewedCols, - Map, String> locationMap) { - String lbDirName; - lbDirName = FileUtils.makeDefaultListBucketingDirName(skewedCols, - lbCtx.getDefaultDirName()); - List defaultKey = Lists.newArrayList(lbCtx.getDefaultKey()); - if (!locationMap.containsKey(defaultKey)) { - locationMap.put(defaultKey, lbDirName); - } - return lbDirName; - } - - protected FSPaths getDynOutPaths(List row, String lbDir) throws HiveException { + protected FSPaths getDynOutPaths(List row) throws HiveException { FSPaths fp; @@ -1155,12 +1052,11 @@ protected FSPaths getDynOutPaths(List row, String lbDir) throws HiveExce String pathKey = null; if (dpDir != null) { - String dpAndLbDir = combinePathFragments(dpDir, lbDir); - pathKey = dpAndLbDir; + pathKey = dpDir; if (conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) { String buckNum = row.get(row.size() - 1); taskId = Utilities.replaceTaskIdFromFilename(taskId, buckNum); - pathKey = dpAndLbDir + Path.SEPARATOR + taskId; + pathKey = dpDir + Path.SEPARATOR + taskId; } FSPaths fsp2 = valToPaths.get(pathKey); @@ -1204,7 +1100,7 @@ protected FSPaths getDynOutPaths(List row, String lbDir) throws HiveExce prevFsp = null; } - fsp2 = createNewPaths(dpDir, lbDir); + fsp2 = createNewPaths(dpDir); if (prevFsp == null) { prevFsp = fsp2; } @@ -1515,50 +1411,10 @@ private void publishStats() throws HiveException { * This is server side code to create key in order to save statistics to stats database. * Client side will read it via StatsTask.java aggregateStats(). * Client side reads it via db query prefix which is based on partition spec. - * Since store-as-subdir information is not part of partition spec, we have to - * remove store-as-subdir information from variable "keyPrefix" calculation. - * But we have to keep store-as-subdir information in variable "key" calculation - * since each skewed value has a row in stats db and "key" is db key, - * otherwise later value overwrites previous value. - * Performance impact due to string handling is minimum since this method is - * only called once in FileSinkOperator closeOp(). - * For example, - * create table test skewed by (key, value) on (('484','val_484') stored as DIRECTORIES; - * skewedValueDirList contains 2 elements: - * 1. key=484/value=val_484 - * 2. HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME/HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME - * Case #1: Static partition with store-as-sub-dir - * spSpec has SP path - * fspKey has either - * key=484/value=val_484 or - * HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME/HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME - * After filter, fspKey is empty, storedAsDirPostFix has either - * key=484/value=val_484 or - * HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME/HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME - * so, at the end, "keyPrefix" doesnt have subdir information but "key" has - * Case #2: Dynamic partition with store-as-sub-dir. Assume dp part is hr - * spSpec has SP path - * fspKey has either - * hr=11/key=484/value=val_484 or - * hr=11/HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME/HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME - * After filter, fspKey is hr=11, storedAsDirPostFix has either - * key=484/value=val_484 or - * HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME/HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME - * so, at the end, "keyPrefix" doesn't have subdir information from skewed but "key" has * - * In a word, fspKey is consists of DP(dynamic partition spec) + LB(list bucketing spec) - * In stats publishing, full partition spec consists of prefix part of stat key - * but list bucketing spec is regarded as a postfix of stat key. So we split it here. + * In a word, fspKey is consists of DP(dynamic partition spec). */ private String[] splitKey(String fspKey) { - if (!fspKey.isEmpty() && isSkewedStoredAsSubDirectories) { - for (String dir : lbCtx.getSkewedValuesDirNames()) { - int index = fspKey.indexOf(dir); - if (index >= 0) { - return new String[] {fspKey.substring(0, index), fspKey.substring(index + 1)}; - } - } - } return new String[] {fspKey, null}; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index dbda5fdef4..207973c591 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -371,7 +371,7 @@ public int execute(DriverContext driverContext) { + " into " + tbd.getTable().getTableName()); } db.loadTable(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getLoadFileType(), - work.isSrcLocal(), isSkewedStoredAsDirs(tbd), isFullAcidOp, hasFollowingStatsTask(), + work.isSrcLocal(), isFullAcidOp, hasFollowingStatsTask(), tbd.getWriteId(), tbd.getStmtId(), tbd.isInsertOverwrite()); if (work.getOutputs() != null) { DDLTask.addIfAbsentByName(new WriteEntity(table, @@ -470,11 +470,9 @@ private DataContainer handleStaticParts(Hive db, Table table, LoadTableDesc tbd, db.loadPartition(tbd.getSourcePath(), db.getTable(tbd.getTable().getTableName()), tbd.getPartitionSpec(), tbd.getLoadFileType(), tbd.getInheritTableSpecs(), - isSkewedStoredAsDirs(tbd), work.isSrcLocal(), - work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID && - !tbd.isMmTable(), - hasFollowingStatsTask(), - tbd.getWriteId(), tbd.getStmtId(), tbd.isInsertOverwrite()); + work.isSrcLocal(), + work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID && !tbd.isMmTable(), + hasFollowingStatsTask(), tbd.getWriteId(), tbd.getStmtId(), tbd.isInsertOverwrite()); Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false); // See the comment inside updatePartitionBucketSortColumns. @@ -705,11 +703,6 @@ private void checkFileFormats(Hive db, LoadTableDesc tbd, Table table) } } - private boolean isSkewedStoredAsDirs(LoadTableDesc tbd) { - return (tbd.getLbCtx() == null) ? false : tbd.getLbCtx() - .isSkewedStoredAsDir(); - } - /** * Alters the bucketing and/or sorting columns of the partition provided they meet some * validation criteria, e.g. the number of buckets match the number of files, and the diff --git ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java index 07abd378c5..0f44142848 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java @@ -20,10 +20,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; import org.apache.hadoop.hive.ql.io.RCFileInputFormat; @@ -38,9 +35,7 @@ import org.apache.hadoop.hive.ql.plan.Explain.Level; import org.apache.hadoop.mapred.InputFormat; -import java.io.IOException; import java.util.ArrayList; -import java.util.LinkedHashMap; import java.util.List; @Explain(displayName = "Merge File Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -142,57 +137,6 @@ public void resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf, inputPaths.add(path); } - /** - * alter table ... concatenate - *

- * If it is skewed table, use subdirectories in inputpaths. - */ - public void resolveConcatenateMerge(HiveConf conf) { - isListBucketingAlterTableConcatenate = - ((listBucketingCtx == null) ? false : listBucketingCtx - .isSkewedStoredAsDir()); - LOG.info("isListBucketingAlterTableConcatenate : " + - isListBucketingAlterTableConcatenate); - if (isListBucketingAlterTableConcatenate) { - // use sub-dir as inputpath. - assert ((this.inputPaths != null) && (this.inputPaths.size() == 1)) : - "alter table ... concatenate should only have one" + - " directory inside inputpaths"; - Path dirPath = inputPaths.get(0); - try { - FileSystem inpFs = dirPath.getFileSystem(conf); - List status = HiveStatsUtils.getFileStatusRecurse( - dirPath, listBucketingCtx.getSkewedColNames().size(), inpFs); - List newInputPath = new ArrayList(); - boolean succeed = true; - for (FileStatus s : status) { - if (s.isDir()) { - // Add the lb path to the list of input paths - newInputPath.add(s.getPath()); - } else { - // find file instead of dir. dont change inputpath - succeed = false; - } - } - assert (succeed || ((!succeed) && newInputPath.isEmpty())) : - "This partition has " - + " inconsistent file structure: " - + - "it is stored-as-subdir and expected all files in the same depth" - + " of subdirectories."; - if (succeed) { - inputPaths.clear(); - inputPaths.addAll(newInputPath); - } - } catch (IOException e) { - String msg = - "Fail to get filesystem for directory name : " + dirPath.toUri(); - throw new RuntimeException(msg, e); - } - - } - } - /** * @return the listBucketingCtx */ diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index e8554f9ce5..141e871ba5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1645,9 +1645,9 @@ public Database getDatabaseCurrent() throws HiveException { * @return Partition object being loaded with data */ public Partition loadPartition(Path loadPath, Table tbl, Map partSpec, - LoadFileType loadFileType, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, - boolean isSrcLocal, boolean isAcidIUDoperation, boolean hasFollowingStatsTask, Long writeId, - int stmtId, boolean isInsertOverwrite) throws HiveException { + LoadFileType loadFileType, boolean inheritTableSpecs, boolean isSrcLocal, + boolean isAcidIUDoperation, boolean hasFollowingStatsTask, Long writeId, int stmtId, + boolean isInsertOverwrite) throws HiveException { Path tblDataLocationPath = tbl.getDataLocation(); boolean isMmTableWrite = AcidUtils.isInsertOnlyTable(tbl.getParameters()); assert tbl.getPath() != null : "null==getPath() for " + tbl.getTableName(); @@ -1765,17 +1765,6 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par StatsSetupConst.clearColumnStatsState(newTPart.getParameters()); } - // recreate the partition if it existed before - if (isSkewedStoreAsSubdir) { - org.apache.hadoop.hive.metastore.api.Partition newCreatedTpart = newTPart.getTPartition(); - SkewedInfo skewedInfo = newCreatedTpart.getSd().getSkewedInfo(); - /* Construct list bucketing location mappings from sub-directory name. */ - Map, String> skewedColValueLocationMaps = constructListBucketingLocationMap( - newPartPath, skewedInfo); - /* Add list bucketing location mappings. */ - skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps); - newCreatedTpart.getSd().setSkewedInfo(skewedInfo); - } if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { StatsSetupConst.setBasicStatsState(newTPart.getParameters(), StatsSetupConst.FALSE); } @@ -2168,7 +2157,7 @@ public Void call() throws Exception { // load the partition Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, loadFileType, - true, numLB > 0, false, isAcid, hasFollowingStatsTask, writeId, stmtId, + true, false, isAcid, hasFollowingStatsTask, writeId, stmtId, isInsertOverwrite); partitionsMap.put(fullPartSpec, newPartition); @@ -2253,17 +2242,15 @@ public Void call() throws Exception { * otherwise add files to table (KEEP_EXISTING, OVERWRITE_EXISTING) * @param isSrcLocal * If the source directory is LOCAL - * @param isSkewedStoreAsSubdir - * if list bucketing enabled * @param hasFollowingStatsTask * if there is any following stats task * @param isAcidIUDoperation true if this is an ACID based Insert [overwrite]/update/delete * @param writeId write ID allocated for the current load operation * @param stmtId statement ID of the current load statement */ - public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType, boolean isSrcLocal, - boolean isSkewedStoreAsSubdir, boolean isAcidIUDoperation, boolean hasFollowingStatsTask, - Long writeId, int stmtId, boolean isInsertOverwrite) throws HiveException { + public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType, + boolean isSrcLocal, boolean isAcidIUDoperation, boolean hasFollowingStatsTask, Long writeId, + int stmtId, boolean isInsertOverwrite) throws HiveException { List newFiles = null; Table tbl = getTable(tableName); assert tbl.getPath() != null : "null==getPath() for " + tbl.getTableName(); @@ -2326,20 +2313,6 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType StatsSetupConst.clearColumnStatsState(tbl.getParameters()); } - try { - if (isSkewedStoreAsSubdir) { - SkewedInfo skewedInfo = tbl.getSkewedInfo(); - // Construct list bucketing location mappings from sub-directory name. - Map, String> skewedColValueLocationMaps = constructListBucketingLocationMap( - tbl.getPath(), skewedInfo); - // Add list bucketing location mappings. - skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps); - } - } catch (IOException e) { - LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); - } - EnvironmentContext environmentContext = null; if (hasFollowingStatsTask) { environmentContext = new EnvironmentContext(); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index 9dbd869d57..f4f76649a2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -552,10 +552,6 @@ public void setLastAccessTime(int lastAccessTime) { tPartition.setLastAccessTime(lastAccessTime); } - public boolean isStoredAsSubDirectories() { - return tPartition.getSd().isStoredAsSubDirectories(); - } - public List> getSkewedColValues(){ return tPartition.getSd().getSkewedInfo().getSkewedColValues(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index ba16f842d2..1e4ebd1227 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -625,14 +625,6 @@ public void setSkewedInfo(SkewedInfo skewedInfo) throws HiveException { tTable.getSd().setSkewedInfo(skewedInfo); } - public boolean isStoredAsSubDirectories() { - return tTable.getSd().isStoredAsSubDirectories(); - } - - public void setStoredAsSubDirectories(boolean storedAsSubDirectories) throws HiveException { - tTable.getSd().setStoredAsSubDirectories(storedAsSubDirectories); - } - private boolean isField(String col) { for (FieldSchema field : getCols()) { if (field.getName().equals(col)) { diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java index 44687ef471..8baac7c8e9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java @@ -382,9 +382,6 @@ private static void getStorageDescriptorInfo(StringBuilder tableInfo, formatOutput("Num Buckets:", String.valueOf(storageDesc.getNumBuckets()), tableInfo); formatOutput("Bucket Columns:", storageDesc.getBucketCols().toString(), tableInfo); formatOutput("Sort Columns:", storageDesc.getSortCols().toString(), tableInfo); - if (storageDesc.isStoredAsSubDirectories()) {// optional parameter - formatOutput("Stored As SubDirectories:", "Yes", tableInfo); - } if (null != storageDesc.getSkewedInfo()) { List skewedColNames = sortedList(storageDesc.getSkewedInfo().getSkewedColNames()); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 605bb09cab..ea6b0b47bd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -1621,8 +1621,7 @@ public static MapWork createMergeTask(FileSinkDesc fsInputDesc, Path finalName, ArrayList inputDirstr = new ArrayList(1); // this will be populated by MergeFileWork.resolveDynamicPartitionStoredAsSubDirsMerge // in case of dynamic partitioning and list bucketing - if (!hasDynamicPartitions && - !GenMapRedUtils.isSkewedStoredAsDirs(fsInputDesc)) { + if (!hasDynamicPartitions) { inputDirs.add(inputDir); } inputDirstr.add(inputDir.toString()); @@ -1851,17 +1850,6 @@ private static ConditionalTask createCondTask(HiveConf conf, return cndTsk; } - /** - * check if it is skewed table and stored as dirs. - * - * @param fsInputDesc - * @return - */ - public static boolean isSkewedStoredAsDirs(FileSinkDesc fsInputDesc) { - return (fsInputDesc.getLbCtx() == null) ? false : fsInputDesc.getLbCtx() - .isSkewedStoredAsDir(); - } - public static Task findMoveTaskForFsopOutput( List> mvTasks, Path fsopFinalDir, boolean isMmFsop) { // find the move task diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 596eddedf5..b1ff80abf9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -1839,17 +1839,15 @@ public boolean hasTransactionalInQuery() { * @param skewedColNames * @param skewedValues * @param skewedColValueLocationMaps - * @param isStoredAsSubDirectories * @return */ protected ListBucketingCtx constructListBucketingCtx(List skewedColNames, List> skewedValues, Map, String> skewedColValueLocationMaps, - boolean isStoredAsSubDirectories, HiveConf conf) { + HiveConf conf) { ListBucketingCtx lbCtx = new ListBucketingCtx(); lbCtx.setSkewedColNames(skewedColNames); lbCtx.setSkewedColValues(skewedValues); lbCtx.setLbLocationMap(skewedColValueLocationMaps); - lbCtx.setStoredAsSubDirectories(isStoredAsSubDirectories); lbCtx.setDefaultKey(ListBucketingPrunerUtils.HIVE_LIST_BUCKETING_DEFAULT_KEY); lbCtx.setDefaultDirName(ListBucketingPrunerUtils.HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME); return lbCtx; @@ -1962,22 +1960,6 @@ protected void analyzeDDLSkewedValues(List> skewedValues, ASTNode c } } - /** - * process stored as directories - * - * @param child - * @return - */ - protected boolean analyzeStoredAdDirs(ASTNode child) { - boolean storedAsDirs = false; - if ((child.getChildCount() == 3) - && (((ASTNode) child.getChild(2)).getToken().getType() - == HiveParser.TOK_STOREDASDIRS)) { - storedAsDirs = true; - } - return storedAsDirs; - } - private static boolean getPartExprNodeDesc(ASTNode astNode, HiveConf conf, Map astExprNodeMap) throws SemanticException { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 37e98456f1..5ef24def24 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -1503,8 +1503,6 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { Path oldTblPartLoc = null; List cols = null; ListBucketingCtx lbCtx = null; - boolean isListBucketed = false; - List listBucketColNames = null; if (table.isPartitioned()) { Partition part = db.getPartition(table, partSpec, false); @@ -1524,9 +1522,7 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { inputFormatClass = part.getInputFormatClass(); isArchived = ArchiveUtils.isArchived(part); lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(), - part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf); - isListBucketed = part.isStoredAsSubDirectories(); - listBucketColNames = part.getSkewedColNames(); + part.getSkewedColValueLocationMaps(), conf); } else { // input and output are the same oldTblPartLoc = table.getPath(); @@ -1535,9 +1531,7 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { bucketCols = table.getBucketCols(); inputFormatClass = table.getInputFormatClass(); lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(), - table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories(), conf); - isListBucketed = table.isStoredAsSubDirectories(); - listBucketColNames = table.getSkewedColNames(); + table.getSkewedColValueLocationMaps(), conf); } // throw a HiveException for non-rcfile. @@ -1570,14 +1564,6 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { throw new SemanticException(ErrorMsg.TRUNCATE_BUCKETED_COLUMN.getMsg(columnName)); } } - if (isListBucketed) { - for (String listBucketCol : listBucketColNames) { - if (listBucketCol.equalsIgnoreCase(columnName)) { - throw new SemanticException( - ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN.getMsg(columnName)); - } - } - } } truncateTblDesc.setColumnIndexes(new ArrayList(columnIndexes)); @@ -2025,7 +2011,7 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, oldTblPartLoc = partPath; lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(), - part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf); + part.getSkewedColValueLocationMaps(), conf); } } else { inputFormatClass = tblObj.getInputFormatClass(); @@ -2036,7 +2022,7 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, newTblPartLoc = tblObj.getPath(); lbCtx = constructListBucketingCtx(tblObj.getSkewedColNames(), tblObj.getSkewedColValues(), - tblObj.getSkewedColValueLocationMaps(), tblObj.isStoredAsSubDirectories(), conf); + tblObj.getSkewedColValueLocationMaps(), conf); } // throw a HiveException for other than rcfile and orcfile. @@ -4074,7 +4060,6 @@ private void analyzeAltertableSkewedby(String[] qualified, ASTNode ast) throws S /* Convert a skewed table to non-skewed table. */ AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, true, new ArrayList(), new ArrayList>()); - alterTblDesc.setStoredAsSubDirectories(false); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); } else { @@ -4082,9 +4067,6 @@ private void analyzeAltertableSkewedby(String[] qualified, ASTNode ast) throws S case HiveParser.TOK_TABLESKEWED: handleAlterTableSkewedBy(ast, tableName, tab); break; - case HiveParser.TOK_STOREDASDIRS: - handleAlterTableDisableStoredAsDirs(tableName, tab); - break; default: assert false; } @@ -4092,29 +4074,7 @@ private void analyzeAltertableSkewedby(String[] qualified, ASTNode ast) throws S } /** - * Handle alter table not stored as directories - * - * @param tableName - * @param tab - * @throws SemanticException - */ - private void handleAlterTableDisableStoredAsDirs(String tableName, Table tab) - throws SemanticException { - List skewedColNames = tab.getSkewedColNames(); - List> skewedColValues = tab.getSkewedColValues(); - if ((skewedColNames == null) || (skewedColNames.size() == 0) || (skewedColValues == null) - || (skewedColValues.size() == 0)) { - throw new SemanticException(ErrorMsg.ALTER_TBL_STOREDASDIR_NOT_SKEWED.getMsg(tableName)); - } - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, false, - skewedColNames, skewedColValues); - alterTblDesc.setStoredAsSubDirectories(false); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); - } - - /** - * Process "alter table skewed by .. on .. stored as directories + * Process "alter table skewed by .. on .. * @param ast * @param tableName * @param tab @@ -4129,13 +4089,9 @@ private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) skewedColNames = analyzeSkewedTablDDLColNames(skewedColNames, skewedNode); /* skewed value. */ analyzeDDLSkewedValues(skewedValues, skewedNode); - // stored as directories - boolean storedAsDirs = analyzeStoredAdDirs(skewedNode); - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, false, skewedColNames, skewedValues); - alterTblDesc.setStoredAsSubDirectories(storedAsDirs); /** * Validate information about skewed table */ @@ -4145,51 +4101,6 @@ private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) alterTblDesc))); } - /** - * Analyze skewed column names - * - * @param skewedColNames - * @param child - * @return - * @throws SemanticException - */ - private List analyzeAlterTableSkewedColNames(List skewedColNames, - ASTNode child) throws SemanticException { - Tree nNode = child.getChild(0); - if (nNode == null) { - throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_NAME.getMsg()); - } else { - ASTNode nAstNode = (ASTNode) nNode; - if (nAstNode.getToken().getType() != HiveParser.TOK_TABCOLNAME) { - throw new SemanticException(ErrorMsg.SKEWED_TABLE_NO_COLUMN_NAME.getMsg()); - } else { - skewedColNames = getColumnNames(nAstNode); - } - } - return skewedColNames; - } - - /** - * Given a ASTNode, return list of values. - * - * use case: - * create table xyz list bucketed (col1) with skew (1,2,5) - * AST Node is for (1,2,5) - * - * @param ast - * @return - */ - private List getColumnValues(ASTNode ast) { - List colList = new ArrayList(); - int numCh = ast.getChildCount(); - for (int i = 0; i < numCh; i++) { - ASTNode child = (ASTNode) ast.getChild(i); - colList.add(stripQuotes(child.getText()).toLowerCase()); - } - return colList; - } - - /** * Analyze alter table's skewed location * diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 8726974186..30c65d604b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -339,7 +339,6 @@ TOK_TABCOLVALUES; TOK_SKEWED_LOCATIONS; TOK_SKEWED_LOCATION_LIST; TOK_SKEWED_LOCATION_MAP; -TOK_STOREDASDIRS; TOK_PARTITIONINGSPEC; TOK_PTBLFUNCTION; TOK_WINDOWDEF; @@ -995,13 +994,6 @@ rewriteDisabled -> ^(TOK_REWRITE_DISABLED) ; -storedAsDirs -@init { pushMsg("stored as directories", state); } -@after { popMsg(state); } - : KW_STORED KW_AS KW_DIRECTORIES - -> ^(TOK_STOREDASDIRS) - ; - orReplace @init { pushMsg("or replace clause", state); } @after { popMsg(state); } @@ -1431,9 +1423,6 @@ alterStatementSuffixSkewedby | KW_NOT KW_SKEWED ->^(TOK_ALTERTABLE_SKEWED) - | - KW_NOT storedAsDirs - ->^(TOK_ALTERTABLE_SKEWED storedAsDirs) ; alterStatementSuffixExchangePartition @@ -1992,8 +1981,8 @@ tableSkewed @init { pushMsg("table skewed specification", state); } @after { popMsg(state); } : - KW_SKEWED KW_BY LPAREN skewedCols=columnNameList RPAREN KW_ON LPAREN (skewedValues=skewedValueElement) RPAREN ((storedAsDirs) => storedAsDirs)? - -> ^(TOK_TABLESKEWED $skewedCols $skewedValues storedAsDirs?) + KW_SKEWED KW_BY LPAREN skewedCols=columnNameList RPAREN KW_ON LPAREN (skewedValues=skewedValueElement) RPAREN + -> ^(TOK_TABLESKEWED $skewedCols $skewedValues) ; rowFormat diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index 189975ed2e..3bcd7bda25 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -316,9 +316,6 @@ private void analyzeLoad(ASTNode ast) throws SemanticException { throw new SemanticException(ErrorMsg.LOAD_INTO_NON_NATIVE.getMsg()); } - if(ts.tableHandle.isStoredAsSubDirectories()) { - throw new SemanticException(ErrorMsg.LOAD_INTO_STORED_AS_DIR.getMsg()); - } List parts = ts.tableHandle.getPartitionKeys(); if ((parts != null && parts.size() > 0) && (ts.partSpec == null || ts.partSpec.size() == 0)) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index e23e4033ff..cc183e9cb7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -7251,9 +7251,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) currentTableId = destTableId; destTableId++; - lbCtx = constructListBucketingCtx(dest_tab.getSkewedColNames(), - dest_tab.getSkewedColValues(), dest_tab.getSkewedColValueLocationMaps(), - dest_tab.isStoredAsSubDirectories(), conf); + lbCtx = constructListBucketingCtx(dest_tab.getSkewedColNames(), dest_tab.getSkewedColValues(), + dest_tab.getSkewedColValueLocationMaps(), conf); // Create the work for moving the table // NOTE: specify Dynamic partitions in dest_tab for WriteEntity @@ -7353,8 +7352,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) destTableId++; lbCtx = constructListBucketingCtx(dest_part.getSkewedColNames(), - dest_part.getSkewedColValues(), dest_part.getSkewedColValueLocationMaps(), - dest_part.isStoredAsSubDirectories(), conf); + dest_part.getSkewedColValues(), dest_part.getSkewedColValueLocationMaps(), conf); AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID; if (destTableIsFullAcid) { acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest); @@ -7750,7 +7748,6 @@ private FileSinkDesc createFileSinkDesc(String dest, TableDesc table_desc, /* Set List Bucketing context. */ if (lbCtx != null) { lbCtx.processRowSkewedIndex(fsRS); - lbCtx.calculateSkewedValueSubDirList(); } fileSinkDesc.setLbCtx(lbCtx); @@ -12910,7 +12907,6 @@ ASTNode analyzeCreateTable( List skewedColNames = new ArrayList(); List> skewedValues = new ArrayList>(); Map, String> listBucketColValuesMapping = new HashMap, String>(); - boolean storedAsDirs = false; boolean isUserStorageFormat = false; RowFormatParams rowFormatParams = new RowFormatParams(); @@ -13040,9 +13036,6 @@ ASTNode analyzeCreateTable( skewedColNames = analyzeSkewedTablDDLColNames(skewedColNames, child); // skewed value analyzeDDLSkewedValues(skewedValues, child); - // stored as directories - storedAsDirs = analyzeStoredAdDirs(child); - break; default: throw new AssertionError("Unknown token: " + child.getToken()); @@ -13121,7 +13114,6 @@ ASTNode analyzeCreateTable( storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); - crtTblDesc.setStoredAsSubDirectories(storedAsDirs); crtTblDesc.setNullFormat(rowFormatParams.nullFormat); crtTblDesc.validate(conf); @@ -13224,7 +13216,6 @@ ASTNode analyzeCreateTable( skewedColNames, skewedValues, true, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); tableDesc.setMaterialization(isMaterialization); - tableDesc.setStoredAsSubDirectories(storedAsDirs); tableDesc.setNullFormat(rowFormatParams.nullFormat); qb.setTableDesc(tableDesc); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java index 512f1ff3da..cd2bfed416 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java @@ -1113,7 +1113,6 @@ private boolean handleCardinalityViolation(StringBuilder rewrittenQueryStr, ASTN table.setDataLocation(Warehouse.getDnsPath(new Path(SessionState.get().getTempTableSpace(), tableName), conf)); table.getTTable().setTemporary(true); - table.setStoredAsSubDirectories(false); table.setInputFormatClass(format.getInputFormat()); table.setOutputFormatClass(format.getOutputFormat()); db.createTable(table, true); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index 0b04c0ce85..ba03e88630 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -121,7 +121,6 @@ ProtectModeType protectModeType; Map, String> skewedLocations; boolean isTurnOffSkewed = false; - boolean isStoredAsSubDirectories = false; List skewedColNames; List> skewedColValues; Table tableForSkewedColValidation; @@ -888,20 +887,6 @@ public void setTable(Table table) { this.tableForSkewedColValidation = table; } - /** - * @return the isStoredAsSubDirectories - */ - public boolean isStoredAsSubDirectories() { - return isStoredAsSubDirectories; - } - - /** - * @param isStoredAsSubDirectories the isStoredAsSubDirectories to set - */ - public void setStoredAsSubDirectories(boolean isStoredAsSubDirectories) { - this.isStoredAsSubDirectories = isStoredAsSubDirectories; - } - /** * @param isDropIfExists the isDropIfExists to set */ diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java index 04292787a8..658451efb6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java @@ -92,7 +92,6 @@ boolean ifNotExists; List skewedColNames; List> skewedColValues; - boolean isStoredAsSubDirectories = false; boolean isTemporary = false; private boolean isMaterialization = false; private boolean replaceMode = false; @@ -605,20 +604,6 @@ public void validate(HiveConf conf) this.getSkewedColValues()); } - /** - * @return the isStoredAsSubDirectories - */ - public boolean isStoredAsSubDirectories() { - return isStoredAsSubDirectories; - } - - /** - * @param isStoredAsSubDirectories the isStoredAsSubDirectories to set - */ - public void setStoredAsSubDirectories(boolean isStoredAsSubDirectories) { - this.isStoredAsSubDirectories = isStoredAsSubDirectories; - } - /** * @return the nullFormat */ @@ -806,8 +791,6 @@ public Table toTable(HiveConf conf) throws HiveException { tbl.getTTable().setTemporary(isTemporary()); - tbl.setStoredAsSubDirectories(isStoredAsSubDirectories()); - tbl.setInputFormatClass(getInputFormat()); tbl.setOutputFormatClass(getOutputFormat()); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java index ef7325fe2c..ed80240c65 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java @@ -85,7 +85,6 @@ public ImportTableDesc(String dbName, Table table) throws Exception { null, null, null); - this.createTblDesc.setStoredAsSubDirectories(table.getSd().isStoredAsSubDirectories()); break; case VIEW: String[] qualViewName = { dbName, table.getTableName() }; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java index 5f8cf54d57..c326b590df 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java @@ -41,14 +41,11 @@ private List> skewedColValues; private Map, String> lbLocationMap; private List rowSkewedIndex; - private boolean isStoredAsSubDirectories; private String defaultKey; private String defaultDirName; - private List skewedValuesDirNames; public ListBucketingCtx() { rowSkewedIndex = new ArrayList(); - skewedValuesDirNames = new ArrayList(); } /** @@ -118,41 +115,6 @@ public void processRowSkewedIndex(RowSchema rowSch) { } } - /** - * Calculate skewed value subdirectory directory which is used in - * FileSinkOperator.java createKeyForStatsPublisher() - * For example, create table test skewed by (key, value) on (('484','val_484') - * stored as DIRECTORIES; - * after the method, skewedValuesDirNames will contain 2 elements: - * key=484/value=val_484 - * HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME/HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME - */ - public void calculateSkewedValueSubDirList() { - if (isSkewedStoredAsDir()) { - for (List value : this.skewedColValues) { - skewedValuesDirNames.add(FileUtils.makeListBucketingDirName(this.skewedColNames, value)); - } - // creat default dir - skewedValuesDirNames.add(FileUtils.makeDefaultListBucketingDirName( - this.skewedColNames, - ListBucketingPrunerUtils.HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME)); - } - } - - /** - * @return the isStoredAsSubDirectories - */ - public boolean isStoredAsSubDirectories() { - return isStoredAsSubDirectories; - } - - /** - * @param isStoredAsSubDirectories the isStoredAsSubDirectories to set - */ - public void setStoredAsSubDirectories(boolean isStoredAsSubDirectories) { - this.isStoredAsSubDirectories = isStoredAsSubDirectories; - } - /** * @return the defaultKey */ @@ -181,46 +143,16 @@ public void setDefaultDirName(String defaultDirName) { this.defaultDirName = defaultDirName; } - /** - * check if list bucketing is enabled. - * - * @param ctx - * @return - */ - public boolean isSkewedStoredAsDir() { - return (this.getSkewedColNames() != null) - && (this.getSkewedColNames().size() > 0) - && (this.getSkewedColValues() != null) - && (this.getSkewedColValues().size() > 0) - && (this.isStoredAsSubDirectories()); - } - /** * Calculate list bucketing level. * - * 0: not list bucketing - * int: no. of skewed columns + * no list bucketing, it's been removed with HIVE-17852 * * @param ctx * @return */ - public int calculateListBucketingLevel() { - int lbLevel = isSkewedStoredAsDir() ? this.getSkewedColNames().size() : 0; - return lbLevel; - } - - /** - * @return the skewedValuesDirNames - */ - public List getSkewedValuesDirNames() { - return skewedValuesDirNames; - } - - /** - * @param skewedValuesDirNames the skewedValuesDirNames to set - */ - public void setSkewedValuesDirNames(List skewedValuesDirNames) { - this.skewedValuesDirNames = skewedValuesDirNames; + public int calculateListBucketingLevel() { + return 0; } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java index 8bb40abc8d..eb2ba492ad 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java @@ -25,14 +25,11 @@ * create table list_bucketing_static_part (key String, value String) * partitioned by (ds String, hr String) * skewed by (value) on ('val_466','val_287','val_82') - * stored as DIRECTORIES * STORED AS RCFILE; * * Skewed column is "value". * 1. It's position in table column is 1. * 2. It's position in skewed column list is 0. - * - * This information will be used in {@FileSinkOperator} generateListBucketingDirName */ public class SkewedColumnPositionPair { private int tblColPosition; diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index 982b180761..4da3c1efcf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -44,24 +44,20 @@ import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.StringableMap; import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; -import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.txn.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnStore; -import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.exec.DDLTask; @@ -75,7 +71,6 @@ import org.apache.hadoop.hive.ql.io.RecordIdentifier; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -445,24 +440,6 @@ private String buildMmCompactionCtQuery( query.append("INTO ").append(t.getSd().getNumBuckets()).append(" BUCKETS"); } - // Stored as directories. We don't care about the skew otherwise. - if (t.getSd().isStoredAsSubDirectories()) { - SkewedInfo skewedInfo = t.getSd().getSkewedInfo(); - if (skewedInfo != null && !skewedInfo.getSkewedColNames().isEmpty()) { - query.append(" SKEWED BY (").append( - StringUtils.join(", ", skewedInfo.getSkewedColNames())).append(") ON "); - isFirst = true; - for (List colValues : skewedInfo.getSkewedColValues()) { - if (!isFirst) { - query.append(", "); - } - isFirst = false; - query.append("('").append(StringUtils.join("','", colValues)).append("')"); - } - query.append(") STORED AS DIRECTORIES"); - } - } - SerDeInfo serdeInfo = sd.getSerdeInfo(); Map serdeParams = serdeInfo.getParameters(); query.append(" ROW FORMAT SERDE '").append(HiveStringUtils.escapeHiveCommand( diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java index e108684660..bf366d9e52 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java @@ -142,7 +142,7 @@ db.createTable(src, cols, null, TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class); db.loadTable(hadoopDataFile[i], src, LoadFileType.KEEP_EXISTING, - true, false, false, false, null, 0, false); + true, false, false, null, 0, false); i++; } diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index a24b6423ba..bd0a01bcf0 100755 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -165,7 +165,6 @@ public void testTable() throws Throwable { tbl.setSerdeParam(serdeConstants.FIELD_DELIM, "1"); tbl.setSerializationLib(LazySimpleSerDe.class.getName()); - tbl.setStoredAsSubDirectories(false); tbl.setRewriteEnabled(false); @@ -226,7 +225,6 @@ public void testThriftTable() throws Throwable { tbl.setSerdeParam(serdeConstants.SERIALIZATION_CLASS, Complex.class.getName()); tbl.setSerdeParam(serdeConstants.SERIALIZATION_FORMAT, TBinaryProtocol.class .getName()); - tbl.setStoredAsSubDirectories(false); tbl.setRewriteEnabled(false); diff --git ql/src/test/queries/clientnegative/load_stored_as_dirs.q ql/src/test/queries/clientnegative/load_stored_as_dirs.q deleted file mode 100644 index 69e23099b9..0000000000 --- ql/src/test/queries/clientnegative/load_stored_as_dirs.q +++ /dev/null @@ -1,5 +0,0 @@ --- Load data can't work with table with stored as directories -CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING) -SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) stored as DIRECTORIES; - -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE stored_as_dirs_multiple; diff --git ql/src/test/queries/clientnegative/truncate_column_list_bucketing.q ql/src/test/queries/clientnegative/truncate_column_list_bucketing.q deleted file mode 100644 index 70cbce865a..0000000000 --- ql/src/test/queries/clientnegative/truncate_column_list_bucketing.q +++ /dev/null @@ -1,14 +0,0 @@ ---! qt:dataset:src -set mapred.input.dir.recursive=true; - --- Tests truncating a column on which a table is list bucketed - -CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE; - -ALTER TABLE test_tab -SKEWED BY (key) ON ("484") -STORED AS DIRECTORIES; - -INSERT OVERWRITE TABLE test_tab SELECT * FROM src; - -TRUNCATE TABLE test_tab COLUMNS (key); diff --git ql/src/test/queries/clientpositive/create_alter_list_bucketing_table1.q ql/src/test/queries/clientpositive/create_alter_list_bucketing_table1.q deleted file mode 100644 index bf89e8feb7..0000000000 --- ql/src/test/queries/clientpositive/create_alter_list_bucketing_table1.q +++ /dev/null @@ -1,38 +0,0 @@ --- Test stored as directories --- it covers a few cases - --- 1. create a table with stored as directories -CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING) -SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) stored as DIRECTORIES; -describe formatted stored_as_dirs_multiple; - --- 2. turn off stored as directories but table is still a skewed table -alter table stored_as_dirs_multiple not stored as DIRECTORIES; -describe formatted stored_as_dirs_multiple; - --- 3. turn off skewed -alter table stored_as_dirs_multiple not skewed; -describe formatted stored_as_dirs_multiple; - --- 4. alter a table to stored as directories -CREATE TABLE stored_as_dirs_single (key STRING, value STRING); -alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6') -stored as DIRECTORIES; -describe formatted stored_as_dirs_single; - --- 5. turn off skewed should turn off stored as directories too -alter table stored_as_dirs_single not skewed; -describe formatted stored_as_dirs_single; - --- 6. turn on stored as directories again -alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6') -stored as DIRECTORIES; -describe formatted stored_as_dirs_single; - --- 7. create table like -create table stored_as_dirs_single_like like stored_as_dirs_single; -describe formatted stored_as_dirs_single_like; - --- cleanup -drop table stored_as_dirs_single; -drop table stored_as_dirs_multiple; diff --git ql/src/test/queries/clientpositive/infer_bucket_sort_list_bucket.q ql/src/test/queries/clientpositive/infer_bucket_sort_list_bucket.q index 547946822a..bcc66b98b2 100644 --- ql/src/test/queries/clientpositive/infer_bucket_sort_list_bucket.q +++ ql/src/test/queries/clientpositive/infer_bucket_sort_list_bucket.q @@ -11,8 +11,7 @@ set mapred.input.dir.recursive=true; -- create a skewed table CREATE TABLE list_bucketing_table (key STRING, value STRING) PARTITIONED BY (part STRING) -SKEWED BY (key) ON ("484") -STORED AS DIRECTORIES; +SKEWED BY (key) ON ("484"); -- Tests group by, the output should neither be bucketed nor sorted @@ -24,8 +23,7 @@ DESC FORMATTED list_bucketing_table PARTITION (part = '1'); -- create a table skewed on a key which doesnt exist in the data CREATE TABLE list_bucketing_table2 (key STRING, value STRING) PARTITIONED BY (part STRING) -SKEWED BY (key) ON ("abc") -STORED AS DIRECTORIES; +SKEWED BY (key) ON ("abc"); -- should not be bucketed or sorted INSERT OVERWRITE TABLE list_bucketing_table2 PARTITION (part = '1') diff --git ql/src/test/queries/clientpositive/lb_fs_stats.q ql/src/test/queries/clientpositive/lb_fs_stats.q index 7cadaf95a3..d2c68ec1e5 100644 --- ql/src/test/queries/clientpositive/lb_fs_stats.q +++ ql/src/test/queries/clientpositive/lb_fs_stats.q @@ -11,7 +11,7 @@ set hive.stats.dbclass=fs; CREATE TABLE test_tab_n0 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE; -ALTER TABLE test_tab_n0 SKEWED BY (key) ON ("484") STORED AS DIRECTORIES; +ALTER TABLE test_tab_n0 SKEWED BY (key) ON ("484"); INSERT OVERWRITE TABLE test_tab_n0 PARTITION (part = '1') SELECT * FROM src; diff --git ql/src/test/queries/clientpositive/list_bucket_dml_1.q ql/src/test/queries/clientpositive/list_bucket_dml_1.q index 23e303fe5e..c555c6efb5 100644 --- ql/src/test/queries/clientpositive/list_bucket_dml_1.q +++ ql/src/test/queries/clientpositive/list_bucket_dml_1.q @@ -14,9 +14,7 @@ set mapred.input.dir.recursive=true; -- create a skewed table create table list_bucketing_dynamic_part_n0 (key String, value String) partitioned by (ds String, hr String) -skewed by (key) on ("484") -stored as DIRECTORIES -; +skewed by (key) on ("484"); -- list bucketing DML explain extended diff --git ql/src/test/queries/clientpositive/list_bucket_dml_10.q ql/src/test/queries/clientpositive/list_bucket_dml_10.q index 60bad7fac2..f3437c4f67 100644 --- ql/src/test/queries/clientpositive/list_bucket_dml_10.q +++ ql/src/test/queries/clientpositive/list_bucket_dml_10.q @@ -18,7 +18,6 @@ set mapred.input.dir.recursive=true; create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','51','103') - stored as DIRECTORIES STORED AS RCFILE; -- list bucketing DML without merge. use bucketize to generate a few small files. diff --git ql/src/test/queries/clientpositive/list_bucket_dml_11.q ql/src/test/queries/clientpositive/list_bucket_dml_11.q index e0acf2aeec..129f178ef3 100644 --- ql/src/test/queries/clientpositive/list_bucket_dml_11.q +++ ql/src/test/queries/clientpositive/list_bucket_dml_11.q @@ -14,7 +14,6 @@ set hive.merge.mapredfiles=false; create table list_bucketing_static_part_n3 (key String, value String) partitioned by (ds String, hr String) skewed by (value) on ('val_466','val_287','val_82') - stored as DIRECTORIES STORED AS RCFILE; -- list bucketing DML without merge. use bucketize to generate a few small files. diff --git ql/src/test/queries/clientpositive/list_bucket_dml_12.q ql/src/test/queries/clientpositive/list_bucket_dml_12.q index d81355aa9b..76b8250fff 100644 --- ql/src/test/queries/clientpositive/list_bucket_dml_12.q +++ ql/src/test/queries/clientpositive/list_bucket_dml_12.q @@ -13,7 +13,6 @@ set hive.merge.mapredfiles=false; create table list_bucketing_mul_col_n0 (col1 String, col2 String, col3 String, col4 String, col5 string) partitioned by (ds String, hr String) skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82')) - stored as DIRECTORIES STORED AS RCFILE; -- list bucketing DML diff --git ql/src/test/queries/clientpositive/list_bucket_dml_13.q ql/src/test/queries/clientpositive/list_bucket_dml_13.q index 091cf0cb58..fcd045f1b3 100644 --- ql/src/test/queries/clientpositive/list_bucket_dml_13.q +++ ql/src/test/queries/clientpositive/list_bucket_dml_13.q @@ -13,7 +13,6 @@ set hive.merge.mapredfiles=false; create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) partitioned by (ds String, hr String) skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82')) - stored as DIRECTORIES STORED AS RCFILE; -- list bucketing DML diff --git ql/src/test/queries/clientpositive/list_bucket_dml_14.q ql/src/test/queries/clientpositive/list_bucket_dml_14.q index a0f9c2c376..f14a033f10 100644 --- ql/src/test/queries/clientpositive/list_bucket_dml_14.q +++ ql/src/test/queries/clientpositive/list_bucket_dml_14.q @@ -12,9 +12,7 @@ set mapred.input.dir.recursive=true; -- create a skewed table create table list_bucketing (key String, value String) -skewed by (key) on ("484") -stored as DIRECTORIES -; +skewed by (key) on ("484"); -- list bucketing DML explain extended diff --git ql/src/test/queries/clientpositive/list_bucket_dml_2.q ql/src/test/queries/clientpositive/list_bucket_dml_2.q index b80e51d013..cc4d5b8a18 100644 --- ql/src/test/queries/clientpositive/list_bucket_dml_2.q +++ ql/src/test/queries/clientpositive/list_bucket_dml_2.q @@ -27,7 +27,6 @@ set hive.stats.reliable=true; create table list_bucketing_static_part_n4 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) - stored as DIRECTORIES STORED AS RCFILE; -- list bucketing DML without merge. use bucketize to generate a few small files. diff --git ql/src/test/queries/clientpositive/list_bucket_dml_3.q ql/src/test/queries/clientpositive/list_bucket_dml_3.q index 08c8ce2ff0..da8282611b 100644 --- ql/src/test/queries/clientpositive/list_bucket_dml_3.q +++ ql/src/test/queries/clientpositive/list_bucket_dml_3.q @@ -12,7 +12,7 @@ set mapred.input.dir.recursive=true; -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) -- create a skewed table -create table list_bucketing_static_part_n1 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES; +create table list_bucketing_static_part_n1 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484"); -- list bucketing DML explain extended diff --git ql/src/test/queries/clientpositive/list_bucket_dml_4.q ql/src/test/queries/clientpositive/list_bucket_dml_4.q index a13915e18b..b2fc1369a3 100644 --- ql/src/test/queries/clientpositive/list_bucket_dml_4.q +++ ql/src/test/queries/clientpositive/list_bucket_dml_4.q @@ -30,7 +30,6 @@ set hive.merge.mapredfiles=false; create table list_bucketing_static_part_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) - stored as DIRECTORIES STORED AS RCFILE; -- list bucketing DML without merge. use bucketize to generate a few small files. diff --git ql/src/test/queries/clientpositive/list_bucket_dml_5.q ql/src/test/queries/clientpositive/list_bucket_dml_5.q index bbfb317d10..4dd676cf5c 100644 --- ql/src/test/queries/clientpositive/list_bucket_dml_5.q +++ ql/src/test/queries/clientpositive/list_bucket_dml_5.q @@ -15,8 +15,7 @@ set mapred.input.dir.recursive=true; -- create a skewed table create table list_bucketing_dynamic_part_n1 (key String, value String) partitioned by (ds String, hr String) -skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) -stored as DIRECTORIES; +skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')); -- list bucketing DML explain extended diff --git ql/src/test/queries/clientpositive/list_bucket_dml_6.q ql/src/test/queries/clientpositive/list_bucket_dml_6.q index b9a526b5ef..a444b79361 100644 --- ql/src/test/queries/clientpositive/list_bucket_dml_6.q +++ ql/src/test/queries/clientpositive/list_bucket_dml_6.q @@ -53,7 +53,6 @@ set hive.merge.mapredfiles=false; create table list_bucketing_dynamic_part_n3 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) - stored as DIRECTORIES STORED AS RCFILE; -- list bucketing DML without merge. use bucketize to generate a few small files. diff --git ql/src/test/queries/clientpositive/list_bucket_dml_7.q ql/src/test/queries/clientpositive/list_bucket_dml_7.q index 2c96407259..e83ecfdb33 100644 --- ql/src/test/queries/clientpositive/list_bucket_dml_7.q +++ ql/src/test/queries/clientpositive/list_bucket_dml_7.q @@ -28,7 +28,6 @@ set hive.merge.rcfile.block.level=true; CREATE TABLE list_bucketing_dynamic_part (key String, value STRING) PARTITIONED BY (ds string, hr string) skewed by (key) on ('484') - stored as DIRECTORIES STORED AS RCFILE; -- list bucketing DML without merge. use bucketize to generate a few small files. diff --git ql/src/test/queries/clientpositive/list_bucket_dml_8.q ql/src/test/queries/clientpositive/list_bucket_dml_8.q index 87f2624d4a..c78e30e8fb 100644 --- ql/src/test/queries/clientpositive/list_bucket_dml_8.q +++ ql/src/test/queries/clientpositive/list_bucket_dml_8.q @@ -54,7 +54,6 @@ set hive.merge.mapredfiles=false; create table list_bucketing_dynamic_part_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) - stored as DIRECTORIES STORED AS RCFILE; -- list bucketing DML without merge. use bucketize to generate a few small files. diff --git ql/src/test/queries/clientpositive/list_bucket_dml_9.q ql/src/test/queries/clientpositive/list_bucket_dml_9.q index e130f05e82..b35e370b8d 100644 --- ql/src/test/queries/clientpositive/list_bucket_dml_9.q +++ ql/src/test/queries/clientpositive/list_bucket_dml_9.q @@ -30,7 +30,6 @@ set hive.merge.mapredfiles=false; create table list_bucketing_static_part_n0 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','103') - stored as DIRECTORIES STORED AS RCFILE; -- list bucketing DML without merge. use bucketize to generate a few small files. diff --git ql/src/test/queries/clientpositive/list_bucket_query_multiskew_1.q ql/src/test/queries/clientpositive/list_bucket_query_multiskew_1.q index 9e377d5763..988cc2c382 100644 --- ql/src/test/queries/clientpositive/list_bucket_query_multiskew_1.q +++ ql/src/test/queries/clientpositive/list_bucket_query_multiskew_1.q @@ -22,8 +22,7 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- create a skewed table create table fact_daily (key String, value String) partitioned by (ds String, hr String) -skewed by (key, value) on (('484','val_484'),('238','val_238')) -stored as DIRECTORIES; +skewed by (key, value) on (('484','val_484'),('238','val_238')); insert overwrite table fact_daily partition (ds = '1', hr = '4') select key, value from src; diff --git ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q index a5f5522a46..6343100ef8 100644 --- ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q +++ ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q @@ -20,8 +20,7 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- create a skewed table create table fact_daily_n2 (key String, value String) partitioned by (ds String, hr String) -skewed by (key, value) on (('484','val_484'),('238','val_238')) -stored as DIRECTORIES; +skewed by (key, value) on (('484','val_484'),('238','val_238')); insert overwrite table fact_daily_n2 partition (ds = '1', hr = '4') select key, value from src; diff --git ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q index 4020063aea..569a5b8ee2 100644 --- ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q +++ ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q @@ -28,13 +28,13 @@ select key, value from src; describe formatted fact_daily_n3 PARTITION (ds = '1', hr='1'); -- partition. skewed value is 484/238 -alter table fact_daily_n3 skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES; +alter table fact_daily_n3 skewed by (key, value) on (('484','val_484'),('238','val_238')); insert overwrite table fact_daily_n3 partition (ds = '1', hr = '2') select key, value from src; describe formatted fact_daily_n3 PARTITION (ds = '1', hr='2'); -- another partition. skewed value is 327 -alter table fact_daily_n3 skewed by (key, value) on (('327','val_327')) stored as DIRECTORIES; +alter table fact_daily_n3 skewed by (key, value) on (('327','val_327')); insert overwrite table fact_daily_n3 partition (ds = '1', hr = '3') select key, value from src; describe formatted fact_daily_n3 PARTITION (ds = '1', hr='3'); diff --git ql/src/test/queries/clientpositive/mm_all.q ql/src/test/queries/clientpositive/mm_all.q index 61dd3e7475..115f0c7444 100644 --- ql/src/test/queries/clientpositive/mm_all.q +++ ql/src/test/queries/clientpositive/mm_all.q @@ -124,7 +124,7 @@ drop table partunion_mm; set mapreduce.input.fileinputformat.input.dir.recursive=true; create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) - stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only"); +tblproperties ("transactional"="true", "transactional_properties"="insert_only"); insert into table skew_mm select key, key, key from intermediate_n0; @@ -134,7 +134,7 @@ drop table skew_mm; create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) -skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only"); +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) tblproperties ("transactional"="true", "transactional_properties"="insert_only"); insert into table skew_dp_union_mm partition (k3) select key as i, key as j, key as k, key as l from intermediate_n0 diff --git ql/src/test/queries/clientpositive/show_create_table_db_table.q ql/src/test/queries/clientpositive/show_create_table_db_table.q index 35ac5bf73e..f98ea8abc9 100644 --- ql/src/test/queries/clientpositive/show_create_table_db_table.q +++ ql/src/test/queries/clientpositive/show_create_table_db_table.q @@ -4,7 +4,7 @@ CREATE DATABASE tmp_feng comment 'for show create table test'; SHOW DATABASES; CREATE TABLE tmp_feng.tmp_showcrt1(key string, value int); CREATE TABLE tmp_feng.tmp_showcrt2(key string, value int) skewed by (key) on ('1','2'); -CREATE TABLE tmp_feng.tmp_showcrt3(key string, value int) skewed by (key) on ('1','2') stored as directories; +CREATE TABLE tmp_feng.tmp_showcrt3(key string, value int) skewed by (key) on ('1','2'); USE default; SHOW CREATE TABLE tmp_feng.tmp_showcrt1; SHOW CREATE TABLE tmp_feng.tmp_showcrt2; diff --git ql/src/test/queries/clientpositive/stats_list_bucket.q ql/src/test/queries/clientpositive/stats_list_bucket.q index c4339d03ce..767ab78d61 100644 --- ql/src/test/queries/clientpositive/stats_list_bucket.q +++ ql/src/test/queries/clientpositive/stats_list_bucket.q @@ -10,7 +10,6 @@ create table stats_list_bucket ( c2 string ) partitioned by (ds string, hr string) skewed by (c1, c2) on (('466','val_466'),('287','val_287'),('82','val_82')) -stored as directories stored as rcfile; @@ -30,7 +29,6 @@ create table stats_list_bucket_1 ( c2 string ) skewed by (c1, c2) on (('466','val_466'),('287','val_287'),('82','val_82')) -stored as directories stored as rcfile; insert overwrite table stats_list_bucket_1 diff --git ql/src/test/queries/clientpositive/truncate_column_list_bucket.q ql/src/test/queries/clientpositive/truncate_column_list_bucket.q index f7498aaba5..2fd29b554b 100644 --- ql/src/test/queries/clientpositive/truncate_column_list_bucket.q +++ ql/src/test/queries/clientpositive/truncate_column_list_bucket.q @@ -13,8 +13,7 @@ set mapred.input.dir.recursive=true; CREATE TABLE test_tab_n3 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE; ALTER TABLE test_tab_n3 -SKEWED BY (key) ON ("484") -STORED AS DIRECTORIES; +SKEWED BY (key) ON ("484"); INSERT OVERWRITE TABLE test_tab_n3 PARTITION (part = '1') SELECT * FROM src; diff --git ql/src/test/results/clientnegative/load_stored_as_dirs.q.out ql/src/test/results/clientnegative/load_stored_as_dirs.q.out deleted file mode 100644 index bcc50e59b8..0000000000 --- ql/src/test/results/clientnegative/load_stored_as_dirs.q.out +++ /dev/null @@ -1,11 +0,0 @@ -PREHOOK: query: CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING) -SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) stored as DIRECTORIES -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@stored_as_dirs_multiple -POSTHOOK: query: CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING) -SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) stored as DIRECTORIES -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@stored_as_dirs_multiple -FAILED: SemanticException [Error 10195]: A stored-as-directories table cannot be used as target for LOAD diff --git ql/src/test/results/clientnegative/truncate_column_list_bucketing.q.out ql/src/test/results/clientnegative/truncate_column_list_bucketing.q.out deleted file mode 100644 index 3b6ddde221..0000000000 --- ql/src/test/results/clientnegative/truncate_column_list_bucketing.q.out +++ /dev/null @@ -1,31 +0,0 @@ -PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@test_tab -POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_tab -PREHOOK: query: ALTER TABLE test_tab -SKEWED BY (key) ON ("484") -STORED AS DIRECTORIES -PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@test_tab -PREHOOK: Output: default@test_tab -POSTHOOK: query: ALTER TABLE test_tab -SKEWED BY (key) ON ("484") -STORED AS DIRECTORIES -POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@test_tab -POSTHOOK: Output: default@test_tab -PREHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@test_tab -POSTHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_tab -POSTHOOK: Lineage: test_tab.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_tab.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: A column on which a partition/table is list bucketed cannot be truncated. key diff --git ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out deleted file mode 100644 index 593ae83899..0000000000 --- ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out +++ /dev/null @@ -1,352 +0,0 @@ -PREHOOK: query: CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING) -SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) stored as DIRECTORIES -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@stored_as_dirs_multiple -POSTHOOK: query: CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING) -SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) stored as DIRECTORIES -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@stored_as_dirs_multiple -PREHOOK: query: describe formatted stored_as_dirs_multiple -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@stored_as_dirs_multiple -POSTHOOK: query: describe formatted stored_as_dirs_multiple -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@stored_as_dirs_multiple -# col_name data_type comment -col1 string -col2 int -col3 string - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\",\"col2\":\"true\",\"col3\":\"true\"}} - bucketing_version 2 - numFiles 0 - numRows 0 - rawDataSize 0 - totalSize 0 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Stored As SubDirectories: Yes -Skewed Columns: [col1, col2] -Skewed Values: [[s1, 1], [s13, 13], [s3, 3], [s78, 78]] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: alter table stored_as_dirs_multiple not stored as DIRECTORIES -PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@stored_as_dirs_multiple -PREHOOK: Output: default@stored_as_dirs_multiple -POSTHOOK: query: alter table stored_as_dirs_multiple not stored as DIRECTORIES -POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@stored_as_dirs_multiple -POSTHOOK: Output: default@stored_as_dirs_multiple -PREHOOK: query: describe formatted stored_as_dirs_multiple -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@stored_as_dirs_multiple -POSTHOOK: query: describe formatted stored_as_dirs_multiple -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@stored_as_dirs_multiple -# col_name data_type comment -col1 string -col2 int -col3 string - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\",\"col2\":\"true\",\"col3\":\"true\"}} - bucketing_version 2 -#### A masked pattern was here #### - numFiles 0 - numRows 0 - rawDataSize 0 - totalSize 0 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Skewed Columns: [col1, col2] -Skewed Values: [[s1, 1], [s13, 13], [s3, 3], [s78, 78]] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: alter table stored_as_dirs_multiple not skewed -PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@stored_as_dirs_multiple -PREHOOK: Output: default@stored_as_dirs_multiple -POSTHOOK: query: alter table stored_as_dirs_multiple not skewed -POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@stored_as_dirs_multiple -POSTHOOK: Output: default@stored_as_dirs_multiple -PREHOOK: query: describe formatted stored_as_dirs_multiple -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@stored_as_dirs_multiple -POSTHOOK: query: describe formatted stored_as_dirs_multiple -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@stored_as_dirs_multiple -# col_name data_type comment -col1 string -col2 int -col3 string - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\",\"col2\":\"true\",\"col3\":\"true\"}} - bucketing_version 2 -#### A masked pattern was here #### - numFiles 0 - numRows 0 - rawDataSize 0 - totalSize 0 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: CREATE TABLE stored_as_dirs_single (key STRING, value STRING) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@stored_as_dirs_single -POSTHOOK: query: CREATE TABLE stored_as_dirs_single (key STRING, value STRING) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@stored_as_dirs_single -PREHOOK: query: alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6') -stored as DIRECTORIES -PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@stored_as_dirs_single -PREHOOK: Output: default@stored_as_dirs_single -POSTHOOK: query: alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6') -stored as DIRECTORIES -POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@stored_as_dirs_single -POSTHOOK: Output: default@stored_as_dirs_single -PREHOOK: query: describe formatted stored_as_dirs_single -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@stored_as_dirs_single -POSTHOOK: query: describe formatted stored_as_dirs_single -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@stored_as_dirs_single -# col_name data_type comment -key string -value string - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - bucketing_version 2 -#### A masked pattern was here #### - numFiles 0 - numRows 0 - rawDataSize 0 - totalSize 0 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Stored As SubDirectories: Yes -Skewed Columns: [key] -Skewed Values: [[1], [5], [6]] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: alter table stored_as_dirs_single not skewed -PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@stored_as_dirs_single -PREHOOK: Output: default@stored_as_dirs_single -POSTHOOK: query: alter table stored_as_dirs_single not skewed -POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@stored_as_dirs_single -POSTHOOK: Output: default@stored_as_dirs_single -PREHOOK: query: describe formatted stored_as_dirs_single -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@stored_as_dirs_single -POSTHOOK: query: describe formatted stored_as_dirs_single -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@stored_as_dirs_single -# col_name data_type comment -key string -value string - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - bucketing_version 2 -#### A masked pattern was here #### - numFiles 0 - numRows 0 - rawDataSize 0 - totalSize 0 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6') -stored as DIRECTORIES -PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@stored_as_dirs_single -PREHOOK: Output: default@stored_as_dirs_single -POSTHOOK: query: alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6') -stored as DIRECTORIES -POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@stored_as_dirs_single -POSTHOOK: Output: default@stored_as_dirs_single -PREHOOK: query: describe formatted stored_as_dirs_single -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@stored_as_dirs_single -POSTHOOK: query: describe formatted stored_as_dirs_single -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@stored_as_dirs_single -# col_name data_type comment -key string -value string - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - bucketing_version 2 -#### A masked pattern was here #### - numFiles 0 - numRows 0 - rawDataSize 0 - totalSize 0 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Stored As SubDirectories: Yes -Skewed Columns: [key] -Skewed Values: [[1], [5], [6]] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: create table stored_as_dirs_single_like like stored_as_dirs_single -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@stored_as_dirs_single_like -POSTHOOK: query: create table stored_as_dirs_single_like like stored_as_dirs_single -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@stored_as_dirs_single_like -PREHOOK: query: describe formatted stored_as_dirs_single_like -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@stored_as_dirs_single_like -POSTHOOK: query: describe formatted stored_as_dirs_single_like -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@stored_as_dirs_single_like -# col_name data_type comment -key string -value string - -# Detailed Table Information -Database: default -#### A masked pattern was here #### -Retention: 0 -#### A masked pattern was here #### -Table Type: MANAGED_TABLE -Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 0 - numRows 0 - rawDataSize 0 - totalSize 0 -#### A masked pattern was here #### - -# Storage Information -SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -InputFormat: org.apache.hadoop.mapred.TextInputFormat -OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat -Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] -Stored As SubDirectories: Yes -Skewed Columns: [key] -Skewed Values: [[1], [5], [6]] -Storage Desc Params: - serialization.format 1 -PREHOOK: query: drop table stored_as_dirs_single -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@stored_as_dirs_single -PREHOOK: Output: default@stored_as_dirs_single -POSTHOOK: query: drop table stored_as_dirs_single -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@stored_as_dirs_single -POSTHOOK: Output: default@stored_as_dirs_single -PREHOOK: query: drop table stored_as_dirs_multiple -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@stored_as_dirs_multiple -PREHOOK: Output: default@stored_as_dirs_multiple -POSTHOOK: query: drop table stored_as_dirs_multiple -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@stored_as_dirs_multiple -POSTHOOK: Output: default@stored_as_dirs_multiple diff --git ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out index c701de9347..7cf9943263 100644 --- ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out +++ ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out @@ -1,14 +1,12 @@ PREHOOK: query: CREATE TABLE list_bucketing_table (key STRING, value STRING) PARTITIONED BY (part STRING) SKEWED BY (key) ON ("484") -STORED AS DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_table POSTHOOK: query: CREATE TABLE list_bucketing_table (key STRING, value STRING) PARTITIONED BY (part STRING) SKEWED BY (key) ON ("484") -STORED AS DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_table @@ -45,7 +43,7 @@ Table: list_bucketing_table #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 2 + numFiles 1 numRows 309 rawDataSize 1482 totalSize 1791 @@ -59,24 +57,19 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[484]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[484]=/list_bucketing_table/part=1/key=484} Storage Desc Params: serialization.format 1 PREHOOK: query: CREATE TABLE list_bucketing_table2 (key STRING, value STRING) PARTITIONED BY (part STRING) SKEWED BY (key) ON ("abc") -STORED AS DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_table2 POSTHOOK: query: CREATE TABLE list_bucketing_table2 (key STRING, value STRING) PARTITIONED BY (part STRING) SKEWED BY (key) ON ("abc") -STORED AS DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_table2 @@ -127,7 +120,6 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[abc]] Storage Desc Params: diff --git ql/src/test/results/clientpositive/lb_fs_stats.q.out ql/src/test/results/clientpositive/lb_fs_stats.q.out index ac4f7b55b4..8738765826 100644 --- ql/src/test/results/clientpositive/lb_fs_stats.q.out +++ ql/src/test/results/clientpositive/lb_fs_stats.q.out @@ -6,11 +6,11 @@ POSTHOOK: query: CREATE TABLE test_tab_n0 (key STRING, value STRING) PARTITIONED POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab_n0 -PREHOOK: query: ALTER TABLE test_tab_n0 SKEWED BY (key) ON ("484") STORED AS DIRECTORIES +PREHOOK: query: ALTER TABLE test_tab_n0 SKEWED BY (key) ON ("484") PREHOOK: type: ALTERTABLE_SKEWED PREHOOK: Input: default@test_tab_n0 PREHOOK: Output: default@test_tab_n0 -POSTHOOK: query: ALTER TABLE test_tab_n0 SKEWED BY (key) ON ("484") STORED AS DIRECTORIES +POSTHOOK: query: ALTER TABLE test_tab_n0 SKEWED BY (key) ON ("484") POSTHOOK: type: ALTERTABLE_SKEWED POSTHOOK: Input: default@test_tab_n0 POSTHOOK: Output: default@test_tab_n0 @@ -45,10 +45,10 @@ Table: test_tab_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 2 + numFiles 1 numRows 500 rawDataSize 4812 - totalSize 5370 + totalSize 5293 #### A masked pattern was here #### # Storage Information @@ -59,10 +59,7 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[484]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[484]=/test_tab_n0/part=1/key=484} Storage Desc Params: serialization.format 1 diff --git ql/src/test/results/clientpositive/list_bucket_dml_1.q.out ql/src/test/results/clientpositive/list_bucket_dml_1.q.out index d2308bb06f..aa4896facd 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_1.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_1.q.out @@ -1,14 +1,12 @@ PREHOOK: query: create table list_bucketing_dynamic_part_n0 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") -stored as DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_dynamic_part_n0 POSTHOOK: query: create table list_bucketing_dynamic_part_n0 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") -stored as DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_dynamic_part_n0 @@ -302,7 +300,7 @@ Table: list_bucketing_dynamic_part_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 2 + numFiles 1 numRows 500 rawDataSize 5312 totalSize 5812 @@ -316,11 +314,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[484]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[484]=/list_bucketing_dynamic_part_n0/ds=2008-04-08/hr=11/key=484} Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted list_bucketing_dynamic_part_n0 partition (ds='2008-04-08', hr='12') @@ -345,7 +340,7 @@ Table: list_bucketing_dynamic_part_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 2 + numFiles 1 numRows 500 rawDataSize 5312 totalSize 5812 @@ -359,11 +354,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[484]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[484]=/list_bucketing_dynamic_part_n0/ds=2008-04-08/hr=12/key=484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds='2008-04-08' @@ -424,7 +416,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing_dynamic_part_n0 - numFiles 2 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string diff --git ql/src/test/results/clientpositive/list_bucket_dml_11.q.out ql/src/test/results/clientpositive/list_bucket_dml_11.q.out index 550d378ac0..bb67c15702 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_11.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_11.q.out @@ -1,7 +1,6 @@ PREHOOK: query: create table list_bucketing_static_part_n3 (key String, value String) partitioned by (ds String, hr String) skewed by (value) on ('val_466','val_287','val_82') - stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -9,7 +8,6 @@ PREHOOK: Output: default@list_bucketing_static_part_n3 POSTHOOK: query: create table list_bucketing_static_part_n3 (key String, value String) partitioned by (ds String, hr String) skewed by (value) on ('val_466','val_287','val_82') - stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -257,10 +255,10 @@ Table: list_bucketing_static_part_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 4 + numFiles 1 numRows 500 rawDataSize 4812 - totalSize 5522 + totalSize 5293 #### A masked pattern was here #### # Storage Information @@ -271,11 +269,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [value] Skewed Values: [[val_287], [val_466], [val_82]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[val_287]=/list_bucketing_static_part_n3/ds=2008-04-08/hr=11/value=val_287, [val_466]=/list_bucketing_static_part_n3/ds=2008-04-08/hr=11/value=val_466, [val_82]=/list_bucketing_static_part_n3/ds=2008-04-08/hr=11/value=val_82} Storage Desc Params: serialization.format 1 PREHOOK: query: explain extended @@ -307,7 +302,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing_static_part_n3 - numFiles 4 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string @@ -315,7 +310,7 @@ STAGE PLANS: serialization.ddl struct list_bucketing_static_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 5522 + totalSize 5293 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe diff --git ql/src/test/results/clientpositive/list_bucket_dml_12.q.out ql/src/test/results/clientpositive/list_bucket_dml_12.q.out index 07b07fb094..0411a08473 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_12.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_12.q.out @@ -1,7 +1,6 @@ PREHOOK: query: create table list_bucketing_mul_col_n0 (col1 String, col2 String, col3 String, col4 String, col5 string) partitioned by (ds String, hr String) skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82')) - stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -9,7 +8,6 @@ PREHOOK: Output: default@list_bucketing_mul_col_n0 POSTHOOK: query: create table list_bucketing_mul_col_n0 (col1 String, col2 String, col3 String, col4 String, col5 string) partitioned by (ds String, hr String) skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82')) - stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -263,10 +261,10 @@ Table: list_bucketing_mul_col_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\",\"col2\":\"true\",\"col3\":\"true\",\"col4\":\"true\",\"col5\":\"true\"}} - numFiles 4 + numFiles 1 numRows 500 rawDataSize 6312 - totalSize 7094 + totalSize 6826 #### A masked pattern was here #### # Storage Information @@ -277,11 +275,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [col2, col4] Skewed Values: [[287, val_287], [466, val_466], [82, val_82]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[287, val_287]=/list_bucketing_mul_col_n0/ds=2008-04-08/hr=11/col2=287/col4=val_287, [466, val_466]=/list_bucketing_mul_col_n0/ds=2008-04-08/hr=11/col2=466/col4=val_466, [82, val_82]=/list_bucketing_mul_col_n0/ds=2008-04-08/hr=11/col2=82/col4=val_82} Storage Desc Params: serialization.format 1 PREHOOK: query: explain extended @@ -315,7 +310,7 @@ STAGE PLANS: columns.types string:string:string:string:string #### A masked pattern was here #### name default.list_bucketing_mul_col_n0 - numFiles 4 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string @@ -323,7 +318,7 @@ STAGE PLANS: serialization.ddl struct list_bucketing_mul_col_n0 { string col1, string col2, string col3, string col4, string col5} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 7094 + totalSize 6826 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe @@ -408,7 +403,7 @@ STAGE PLANS: columns.types string:string:string:string:string #### A masked pattern was here #### name default.list_bucketing_mul_col_n0 - numFiles 4 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string @@ -416,7 +411,7 @@ STAGE PLANS: serialization.ddl struct list_bucketing_mul_col_n0 { string col1, string col2, string col3, string col4, string col5} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 7094 + totalSize 6826 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe diff --git ql/src/test/results/clientpositive/list_bucket_dml_13.q.out ql/src/test/results/clientpositive/list_bucket_dml_13.q.out index 4d1224bad6..cf9cebe24c 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_13.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_13.q.out @@ -1,7 +1,6 @@ PREHOOK: query: create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) partitioned by (ds String, hr String) skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82')) - stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -9,7 +8,6 @@ PREHOOK: Output: default@list_bucketing_mul_col POSTHOOK: query: create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) partitioned by (ds String, hr String) skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82')) - stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -263,10 +261,10 @@ Table: list_bucketing_mul_col #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\",\"col2\":\"true\",\"col3\":\"true\",\"col4\":\"true\",\"col5\":\"true\"}} - numFiles 4 + numFiles 1 numRows 500 rawDataSize 6312 - totalSize 7094 + totalSize 6826 #### A masked pattern was here #### # Storage Information @@ -277,11 +275,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [col2, col4] Skewed Values: [[287, val_287], [466, val_466], [82, val_82]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[287, val_287]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=287/col4=val_287, [466, val_466]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=466/col4=val_466, [82, val_82]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=82/col4=val_82} Storage Desc Params: serialization.format 1 PREHOOK: query: explain extended @@ -315,7 +310,7 @@ STAGE PLANS: columns.types string:string:string:string:string #### A masked pattern was here #### name default.list_bucketing_mul_col - numFiles 4 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string @@ -323,7 +318,7 @@ STAGE PLANS: serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 7094 + totalSize 6826 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe diff --git ql/src/test/results/clientpositive/list_bucket_dml_14.q.out ql/src/test/results/clientpositive/list_bucket_dml_14.q.out index ed23b0d8b3..a37e58d2aa 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_14.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_14.q.out @@ -1,12 +1,10 @@ PREHOOK: query: create table list_bucketing (key String, value String) skewed by (key) on ("484") -stored as DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing POSTHOOK: query: create table list_bucketing (key String, value String) skewed by (key) on ("484") -stored as DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing @@ -233,7 +231,7 @@ Table Type: MANAGED_TABLE Table Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} bucketing_version 2 - numFiles 2 + numFiles 1 numRows 500 rawDataSize 5312 totalSize 5812 @@ -247,11 +245,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[484]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[484]=/list_bucketing/key=484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from src @@ -348,7 +343,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing - numFiles 2 + numFiles 1 numRows 500 rawDataSize 5312 serialization.ddl struct list_bucketing { string key, string value} @@ -370,7 +365,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing - numFiles 2 + numFiles 1 numRows 500 rawDataSize 5312 serialization.ddl struct list_bucketing { string key, string value} diff --git ql/src/test/results/clientpositive/list_bucket_dml_2.q.out ql/src/test/results/clientpositive/list_bucket_dml_2.q.out index ea621b8e67..c061d73e36 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_2.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_2.q.out @@ -1,7 +1,6 @@ PREHOOK: query: create table list_bucketing_static_part_n4 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) - stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -9,7 +8,6 @@ PREHOOK: Output: default@list_bucketing_static_part_n4 POSTHOOK: query: create table list_bucketing_static_part_n4 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) - stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -312,10 +310,10 @@ Table: list_bucketing_static_part_n4 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 6 + numFiles 2 numRows 1000 rawDataSize 9624 - totalSize 10898 + totalSize 10586 #### A masked pattern was here #### # Storage Information @@ -326,11 +324,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_static_part_n4/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part_n4/ds=2008-04-08/hr=11/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' @@ -380,7 +375,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing_static_part_n4 - numFiles 6 + numFiles 2 numRows 1000 partition_columns ds/hr partition_columns.types string:string @@ -388,7 +383,7 @@ STAGE PLANS: serialization.ddl struct list_bucketing_static_part_n4 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 10898 + totalSize 10586 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe diff --git ql/src/test/results/clientpositive/list_bucket_dml_3.q.out ql/src/test/results/clientpositive/list_bucket_dml_3.q.out index 3a6b27be2e..08dbcca626 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_3.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_3.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table list_bucketing_static_part_n1 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES +PREHOOK: query: create table list_bucketing_static_part_n1 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_static_part_n1 -POSTHOOK: query: create table list_bucketing_static_part_n1 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES +POSTHOOK: query: create table list_bucketing_static_part_n1 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_static_part_n1 @@ -293,7 +293,7 @@ Table: list_bucketing_static_part_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 4 + numFiles 2 numRows 1000 rawDataSize 10624 totalSize 11624 @@ -307,11 +307,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[484]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[484]=/list_bucketing_static_part_n1/ds=2008-04-08/hr=11/key=484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds='2008-04-08' @@ -372,7 +369,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing_static_part_n1 - numFiles 4 + numFiles 2 numRows 1000 partition_columns ds/hr partition_columns.types string:string diff --git ql/src/test/results/clientpositive/list_bucket_dml_4.q.out ql/src/test/results/clientpositive/list_bucket_dml_4.q.out index 823e9e1135..ad65583caa 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_4.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_4.q.out @@ -1,7 +1,6 @@ PREHOOK: query: create table list_bucketing_static_part_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) - stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -9,7 +8,6 @@ PREHOOK: Output: default@list_bucketing_static_part_n2 POSTHOOK: query: create table list_bucketing_static_part_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) - stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -312,10 +310,10 @@ Table: list_bucketing_static_part_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 6 + numFiles 2 numRows 1000 rawDataSize 9624 - totalSize 10898 + totalSize 10586 #### A masked pattern was here #### # Storage Information @@ -326,11 +324,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_static_part_n2/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part_n2/ds=2008-04-08/hr=11/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: explain extended @@ -751,10 +746,10 @@ Table: list_bucketing_static_part_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 4 + numFiles 2 numRows 1000 rawDataSize 9624 - totalSize 10786 + totalSize 10586 #### A masked pattern was here #### # Storage Information @@ -765,11 +760,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_static_part_n2/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part_n2/ds=2008-04-08/hr=11/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' @@ -819,7 +811,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing_static_part_n2 - numFiles 4 + numFiles 2 numRows 1000 partition_columns ds/hr partition_columns.types string:string @@ -827,7 +819,7 @@ STAGE PLANS: serialization.ddl struct list_bucketing_static_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 10786 + totalSize 10586 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe diff --git ql/src/test/results/clientpositive/list_bucket_dml_5.q.out ql/src/test/results/clientpositive/list_bucket_dml_5.q.out index d71834f94b..685773b3c6 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_5.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_5.q.out @@ -1,14 +1,12 @@ PREHOOK: query: create table list_bucketing_dynamic_part_n1 (key String, value String) partitioned by (ds String, hr String) -skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) -stored as DIRECTORIES +skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_dynamic_part_n1 POSTHOOK: query: create table list_bucketing_dynamic_part_n1 (key String, value String) partitioned by (ds String, hr String) -skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) -stored as DIRECTORIES +skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_dynamic_part_n1 @@ -302,7 +300,7 @@ Table: list_bucketing_dynamic_part_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 3 + numFiles 1 numRows 500 rawDataSize 5312 totalSize 5812 @@ -316,11 +314,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part_n1/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part_n1/ds=2008-04-08/hr=11/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted list_bucketing_dynamic_part_n1 partition (ds='2008-04-08', hr='12') @@ -345,7 +340,7 @@ Table: list_bucketing_dynamic_part_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 3 + numFiles 1 numRows 500 rawDataSize 5312 totalSize 5812 @@ -359,11 +354,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part_n1/ds=2008-04-08/hr=12/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part_n1/ds=2008-04-08/hr=12/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds='2008-04-08' @@ -429,7 +421,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing_dynamic_part_n1 - numFiles 3 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string @@ -476,7 +468,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing_dynamic_part_n1 - numFiles 3 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string diff --git ql/src/test/results/clientpositive/list_bucket_dml_6.q.out ql/src/test/results/clientpositive/list_bucket_dml_6.q.out index cd3228e2a8..92fc50ad57 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_6.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_6.q.out @@ -1,7 +1,6 @@ PREHOOK: query: create table list_bucketing_dynamic_part_n3 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) - stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -9,7 +8,6 @@ PREHOOK: Output: default@list_bucketing_dynamic_part_n3 POSTHOOK: query: create table list_bucketing_dynamic_part_n3 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) - stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -330,7 +328,6 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] Storage Desc Params: @@ -357,10 +354,10 @@ Table: list_bucketing_dynamic_part_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 6 + numFiles 2 numRows 984 rawDataSize 9488 - totalSize 10734 + totalSize 10422 #### A masked pattern was here #### # Storage Information @@ -371,11 +368,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part_n3/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part_n3/ds=2008-04-08/hr=b1/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: explain extended @@ -820,7 +814,6 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] Storage Desc Params: @@ -847,10 +840,10 @@ Table: list_bucketing_dynamic_part_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 4 + numFiles 2 numRows 984 rawDataSize 9488 - totalSize 10622 + totalSize 10422 #### A masked pattern was here #### # Storage Information @@ -861,11 +854,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part_n3/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part_n3/ds=2008-04-08/hr=b1/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' @@ -962,7 +952,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing_dynamic_part_n3 - numFiles 4 + numFiles 2 numRows 984 partition_columns ds/hr partition_columns.types string:string @@ -970,7 +960,7 @@ STAGE PLANS: serialization.ddl struct list_bucketing_dynamic_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 10622 + totalSize 10422 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe diff --git ql/src/test/results/clientpositive/list_bucket_dml_7.q.out ql/src/test/results/clientpositive/list_bucket_dml_7.q.out index a325337ac4..2187b9bd9f 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_7.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_7.q.out @@ -1,7 +1,6 @@ PREHOOK: query: CREATE TABLE list_bucketing_dynamic_part (key String, value STRING) PARTITIONED BY (ds string, hr string) skewed by (key) on ('484') - stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -9,7 +8,6 @@ PREHOOK: Output: default@list_bucketing_dynamic_part POSTHOOK: query: CREATE TABLE list_bucketing_dynamic_part (key String, value STRING) PARTITIONED BY (ds string, hr string) skewed by (key) on ('484') - stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -330,7 +328,6 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[484]] Storage Desc Params: @@ -357,10 +354,10 @@ Table: list_bucketing_dynamic_part #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 4 + numFiles 2 numRows 984 rawDataSize 9488 - totalSize 10576 + totalSize 10422 #### A masked pattern was here #### # Storage Information @@ -371,11 +368,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[484]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484} Storage Desc Params: serialization.format 1 PREHOOK: query: explain extended @@ -820,7 +814,6 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[484]] Storage Desc Params: @@ -847,10 +840,10 @@ Table: list_bucketing_dynamic_part #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 3 + numFiles 2 numRows 984 rawDataSize 9488 - totalSize 10520 + totalSize 10422 #### A masked pattern was here #### # Storage Information @@ -861,11 +854,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[484]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' @@ -962,7 +952,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing_dynamic_part - numFiles 3 + numFiles 2 numRows 984 partition_columns ds/hr partition_columns.types string:string @@ -970,7 +960,7 @@ STAGE PLANS: serialization.ddl struct list_bucketing_dynamic_part { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 10520 + totalSize 10422 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe diff --git ql/src/test/results/clientpositive/list_bucket_dml_8.q.out ql/src/test/results/clientpositive/list_bucket_dml_8.q.out index 3aa3940132..8c27ea0825 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_8.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_8.q.out @@ -1,7 +1,6 @@ PREHOOK: query: create table list_bucketing_dynamic_part_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) - stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -9,7 +8,6 @@ PREHOOK: Output: default@list_bucketing_dynamic_part_n2 POSTHOOK: query: create table list_bucketing_dynamic_part_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) - stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -330,7 +328,6 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] Storage Desc Params: @@ -357,10 +354,10 @@ Table: list_bucketing_dynamic_part_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 6 + numFiles 2 numRows 984 rawDataSize 9488 - totalSize 10734 + totalSize 10422 #### A masked pattern was here #### # Storage Information @@ -371,11 +368,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part_n2/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part_n2/ds=2008-04-08/hr=b1/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: alter table list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', hr='b1') concatenate @@ -407,10 +401,10 @@ Database: default Table: list_bucketing_dynamic_part_n2 #### A masked pattern was here #### Partition Parameters: - numFiles 3 + numFiles 1 numRows 984 rawDataSize 9488 - totalSize 10586 + totalSize 10386 #### A masked pattern was here #### # Storage Information @@ -421,11 +415,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part_n2/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part_n2/ds=2008-04-08/hr=b1/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' @@ -525,7 +516,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing_dynamic_part_n2 - numFiles 3 + numFiles 1 numRows 984 partition_columns ds/hr partition_columns.types string:string @@ -533,7 +524,7 @@ STAGE PLANS: serialization.ddl struct list_bucketing_dynamic_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 10586 + totalSize 10386 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe diff --git ql/src/test/results/clientpositive/list_bucket_dml_9.q.out ql/src/test/results/clientpositive/list_bucket_dml_9.q.out index 78f1d8ee8f..73fd02c3e3 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_9.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_9.q.out @@ -1,7 +1,6 @@ PREHOOK: query: create table list_bucketing_static_part_n0 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','103') - stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -9,7 +8,6 @@ PREHOOK: Output: default@list_bucketing_static_part_n0 POSTHOOK: query: create table list_bucketing_static_part_n0 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','103') - stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -312,10 +310,10 @@ Table: list_bucketing_static_part_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 6 + numFiles 2 numRows 1000 rawDataSize 9624 - totalSize 10898 + totalSize 10586 #### A masked pattern was here #### # Storage Information @@ -326,11 +324,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[103], [484]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[103]=/list_bucketing_static_part_n0/ds=2008-04-08/hr=11/key=103, [484]=/list_bucketing_static_part_n0/ds=2008-04-08/hr=11/key=484} Storage Desc Params: serialization.format 1 PREHOOK: query: explain extended @@ -751,10 +746,10 @@ Table: list_bucketing_static_part_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 4 + numFiles 2 numRows 1000 rawDataSize 9624 - totalSize 10786 + totalSize 10586 #### A masked pattern was here #### # Storage Information @@ -765,11 +760,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[103], [484]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[103]=/list_bucketing_static_part_n0/ds=2008-04-08/hr=11/key=103, [484]=/list_bucketing_static_part_n0/ds=2008-04-08/hr=11/key=484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' @@ -819,7 +811,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing_static_part_n0 - numFiles 4 + numFiles 2 numRows 1000 partition_columns ds/hr partition_columns.types string:string @@ -827,7 +819,7 @@ STAGE PLANS: serialization.ddl struct list_bucketing_static_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 10786 + totalSize 10586 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe diff --git ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out index 40455b771e..4b9715189b 100644 --- ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out +++ ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out @@ -1,14 +1,12 @@ PREHOOK: query: create table fact_daily (key String, value String) partitioned by (ds String, hr String) -skewed by (key, value) on (('484','val_484'),('238','val_238')) -stored as DIRECTORIES +skewed by (key, value) on (('484','val_484'),('238','val_238')) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@fact_daily POSTHOOK: query: create table fact_daily (key String, value String) partitioned by (ds String, hr String) -skewed by (key, value) on (('484','val_484'),('238','val_238')) -stored as DIRECTORIES +skewed by (key, value) on (('484','val_484'),('238','val_238')) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@fact_daily @@ -46,7 +44,7 @@ Table: fact_daily #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 3 + numFiles 1 numRows 500 rawDataSize 5312 totalSize 5812 @@ -60,11 +58,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[238, val_238], [484, val_484]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[238, val_238]=/fact_daily/ds=1/hr=4/key=238/value=val_238, [484, val_484]=/fact_daily/ds=1/hr=4/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: SELECT count(1) FROM fact_daily WHERE ds='1' and hr='4' @@ -103,7 +98,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.fact_daily - numFiles 3 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string @@ -188,7 +183,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.fact_daily - numFiles 3 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string @@ -274,7 +269,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.fact_daily - numFiles 3 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string @@ -358,7 +353,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.fact_daily - numFiles 3 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string diff --git ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out index a7cf4e90c4..70990696c2 100644 --- ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out +++ ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out @@ -1,14 +1,12 @@ PREHOOK: query: create table fact_daily_n2 (key String, value String) partitioned by (ds String, hr String) -skewed by (key, value) on (('484','val_484'),('238','val_238')) -stored as DIRECTORIES +skewed by (key, value) on (('484','val_484'),('238','val_238')) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@fact_daily_n2 POSTHOOK: query: create table fact_daily_n2 (key String, value String) partitioned by (ds String, hr String) -skewed by (key, value) on (('484','val_484'),('238','val_238')) -stored as DIRECTORIES +skewed by (key, value) on (('484','val_484'),('238','val_238')) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@fact_daily_n2 @@ -46,7 +44,7 @@ Table: fact_daily_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 3 + numFiles 1 numRows 500 rawDataSize 5312 totalSize 5812 @@ -60,11 +58,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[238, val_238], [484, val_484]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[238, val_238]=/fact_daily_n2/ds=1/hr=4/key=238/value=val_238, [484, val_484]=/fact_daily_n2/ds=1/hr=4/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: SELECT count(1) FROM fact_daily_n2 WHERE ds='1' and hr='4' @@ -103,7 +98,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.fact_daily_n2 - numFiles 3 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string @@ -188,7 +183,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.fact_daily_n2 - numFiles 3 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string @@ -276,7 +271,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.fact_daily_n2 - numFiles 3 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string diff --git ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out index 273f2d0b02..e7c669deec 100644 --- ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out +++ ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out @@ -58,11 +58,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table fact_daily_n3 skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES +PREHOOK: query: alter table fact_daily_n3 skewed by (key, value) on (('484','val_484'),('238','val_238')) PREHOOK: type: ALTERTABLE_SKEWED PREHOOK: Input: default@fact_daily_n3 PREHOOK: Output: default@fact_daily_n3 -POSTHOOK: query: alter table fact_daily_n3 skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES +POSTHOOK: query: alter table fact_daily_n3 skewed by (key, value) on (('484','val_484'),('238','val_238')) POSTHOOK: type: ALTERTABLE_SKEWED POSTHOOK: Input: default@fact_daily_n3 POSTHOOK: Output: default@fact_daily_n3 @@ -100,7 +100,7 @@ Table: fact_daily_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 3 + numFiles 1 numRows 500 rawDataSize 5312 totalSize 5812 @@ -114,18 +114,15 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[238, val_238], [484, val_484]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[238, val_238]=/fact_daily_n3/ds=1/hr=2/key=238/value=val_238, [484, val_484]=/fact_daily_n3/ds=1/hr=2/key=484/value=val_484} Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table fact_daily_n3 skewed by (key, value) on (('327','val_327')) stored as DIRECTORIES +PREHOOK: query: alter table fact_daily_n3 skewed by (key, value) on (('327','val_327')) PREHOOK: type: ALTERTABLE_SKEWED PREHOOK: Input: default@fact_daily_n3 PREHOOK: Output: default@fact_daily_n3 -POSTHOOK: query: alter table fact_daily_n3 skewed by (key, value) on (('327','val_327')) stored as DIRECTORIES +POSTHOOK: query: alter table fact_daily_n3 skewed by (key, value) on (('327','val_327')) POSTHOOK: type: ALTERTABLE_SKEWED POSTHOOK: Input: default@fact_daily_n3 POSTHOOK: Output: default@fact_daily_n3 @@ -163,7 +160,7 @@ Table: fact_daily_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 2 + numFiles 1 numRows 500 rawDataSize 5312 totalSize 5812 @@ -177,11 +174,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[327, val_327]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[327, val_327]=/fact_daily_n3/ds=1/hr=3/key=327/value=val_327} Storage Desc Params: serialization.format 1 PREHOOK: query: explain extended @@ -325,7 +319,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.fact_daily_n3 - numFiles 3 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string @@ -412,7 +406,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.fact_daily_n3 - numFiles 2 + numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string diff --git ql/src/test/results/clientpositive/llap/list_bucket_dml_10.q.out ql/src/test/results/clientpositive/llap/list_bucket_dml_10.q.out index 5e9d191885..a4a83181b2 100644 --- ql/src/test/results/clientpositive/llap/list_bucket_dml_10.q.out +++ ql/src/test/results/clientpositive/llap/list_bucket_dml_10.q.out @@ -1,7 +1,6 @@ PREHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','51','103') - stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -9,7 +8,6 @@ PREHOOK: Output: default@list_bucketing_static_part POSTHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','51','103') - stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -271,10 +269,10 @@ Table: list_bucketing_static_part #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} - numFiles 4 + numFiles 1 numRows 500 rawDataSize 4812 - totalSize 5520 + totalSize 5293 #### A masked pattern was here #### # Storage Information @@ -285,10 +283,7 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[103], [484], [51]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484, [51]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=51} Storage Desc Params: serialization.format 1 diff --git ql/src/test/results/clientpositive/llap/mm_all.q.out ql/src/test/results/clientpositive/llap/mm_all.q.out index 95734b6b4f..bfb4675d1f 100644 --- ql/src/test/results/clientpositive/llap/mm_all.q.out +++ ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -778,12 +778,12 @@ POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@partunion_mm POSTHOOK: Output: default@partunion_mm PREHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) - stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only") +tblproperties ("transactional"="true", "transactional_properties"="insert_only") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@skew_mm POSTHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) - stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only") +tblproperties ("transactional"="true", "transactional_properties"="insert_only") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@skew_mm @@ -829,12 +829,12 @@ POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@skew_mm POSTHOOK: Output: default@skew_mm PREHOOK: query: create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) -skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only") +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) tblproperties ("transactional"="true", "transactional_properties"="insert_only") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@skew_dp_union_mm POSTHOOK: query: create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) -skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only") +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) tblproperties ("transactional"="true", "transactional_properties"="insert_only") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@skew_dp_union_mm diff --git ql/src/test/results/clientpositive/mm_all.q.out ql/src/test/results/clientpositive/mm_all.q.out index e7df4c0a29..b38de22528 100644 --- ql/src/test/results/clientpositive/mm_all.q.out +++ ql/src/test/results/clientpositive/mm_all.q.out @@ -792,12 +792,12 @@ POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@partunion_mm POSTHOOK: Output: default@partunion_mm PREHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) - stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only") +tblproperties ("transactional"="true", "transactional_properties"="insert_only") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@skew_mm POSTHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) - stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only") +tblproperties ("transactional"="true", "transactional_properties"="insert_only") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@skew_mm @@ -843,12 +843,12 @@ POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@skew_mm POSTHOOK: Output: default@skew_mm PREHOOK: query: create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) -skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only") +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) tblproperties ("transactional"="true", "transactional_properties"="insert_only") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@skew_dp_union_mm POSTHOOK: query: create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) -skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only") +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) tblproperties ("transactional"="true", "transactional_properties"="insert_only") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@skew_dp_union_mm diff --git ql/src/test/results/clientpositive/show_create_table_db_table.q.out ql/src/test/results/clientpositive/show_create_table_db_table.q.out index 33410c8582..4bda1d3098 100644 --- ql/src/test/results/clientpositive/show_create_table_db_table.q.out +++ ql/src/test/results/clientpositive/show_create_table_db_table.q.out @@ -26,11 +26,11 @@ POSTHOOK: query: CREATE TABLE tmp_feng.tmp_showcrt2(key string, value int) skewe POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:tmp_feng POSTHOOK: Output: tmp_feng@tmp_showcrt2 -PREHOOK: query: CREATE TABLE tmp_feng.tmp_showcrt3(key string, value int) skewed by (key) on ('1','2') stored as directories +PREHOOK: query: CREATE TABLE tmp_feng.tmp_showcrt3(key string, value int) skewed by (key) on ('1','2') PREHOOK: type: CREATETABLE PREHOOK: Output: database:tmp_feng PREHOOK: Output: tmp_feng@tmp_showcrt3 -POSTHOOK: query: CREATE TABLE tmp_feng.tmp_showcrt3(key string, value int) skewed by (key) on ('1','2') stored as directories +POSTHOOK: query: CREATE TABLE tmp_feng.tmp_showcrt3(key string, value int) skewed by (key) on ('1','2') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:tmp_feng POSTHOOK: Output: tmp_feng@tmp_showcrt3 @@ -93,7 +93,6 @@ CREATE TABLE `tmp_feng.tmp_showcrt3`( `value` int) SKEWED BY (key) ON (('1'),('2')) - STORED AS DIRECTORIES ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' STORED AS INPUTFORMAT diff --git ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out index e14ae92d1c..d6212564bc 100644 --- ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out +++ ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out @@ -1,7 +1,6 @@ PREHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','51','103') - stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -9,7 +8,6 @@ PREHOOK: Output: default@list_bucketing_static_part POSTHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','51','103') - stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -207,10 +205,10 @@ Table: list_bucketing_static_part #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} - numFiles 4 + numFiles 1 numRows 500 rawDataSize 4812 - totalSize 5520 + totalSize 5293 #### A masked pattern was here #### # Storage Information @@ -221,10 +219,7 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[103], [484], [51]] -Skewed Value to Path: {[103]=hdfs://### HDFS PATH ###, [484]=hdfs://### HDFS PATH ###, [51]=hdfs://### HDFS PATH ###} -Skewed Value to Truncated Path: {[103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484, [51]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=51} Storage Desc Params: serialization.format 1 diff --git ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out index 1dd4884cda..c6ed471e0e 100644 --- ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out +++ ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out @@ -1,7 +1,6 @@ PREHOOK: query: create table list_bucketing_static_part_n4 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) - stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -9,7 +8,6 @@ PREHOOK: Output: default@list_bucketing_static_part_n4 POSTHOOK: query: create table list_bucketing_static_part_n4 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) - stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -258,10 +256,10 @@ Table: list_bucketing_static_part_n4 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} - numFiles 6 + numFiles 2 numRows 1000 rawDataSize 9624 - totalSize 10898 + totalSize 10586 #### A masked pattern was here #### # Storage Information @@ -272,11 +270,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_static_part_n4/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part_n4/ds=2008-04-08/hr=11/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' @@ -326,7 +321,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing_static_part_n4 - numFiles 6 + numFiles 2 numRows 1000 partition_columns ds/hr partition_columns.types string:string @@ -334,7 +329,7 @@ STAGE PLANS: serialization.ddl struct list_bucketing_static_part_n4 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 10898 + totalSize 10586 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe diff --git ql/src/test/results/clientpositive/stats_list_bucket.q.out ql/src/test/results/clientpositive/stats_list_bucket.q.out index 566af31192..946859bb68 100644 --- ql/src/test/results/clientpositive/stats_list_bucket.q.out +++ ql/src/test/results/clientpositive/stats_list_bucket.q.out @@ -11,7 +11,6 @@ PREHOOK: query: create table stats_list_bucket ( c2 string ) partitioned by (ds string, hr string) skewed by (c1, c2) on (('466','val_466'),('287','val_287'),('82','val_82')) -stored as directories stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -21,7 +20,6 @@ POSTHOOK: query: create table stats_list_bucket ( c2 string ) partitioned by (ds string, hr string) skewed by (c1, c2) on (('466','val_466'),('287','val_287'),('82','val_82')) -stored as directories stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -60,10 +58,10 @@ Table: stats_list_bucket #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"c1\":\"true\",\"c2\":\"true\"}} - numFiles 4 + numFiles 1 numRows 500 rawDataSize 4812 - totalSize 5522 + totalSize 5293 #### A masked pattern was here #### # Storage Information @@ -74,11 +72,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [c1, c2] Skewed Values: [[287, val_287], [466, val_466], [82, val_82]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[287, val_287]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=287/c2=val_287, [466, val_466]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=466/c2=val_466, [82, val_82]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=82/c2=val_82} Storage Desc Params: serialization.format 1 PREHOOK: query: create table stats_list_bucket_1 ( @@ -86,7 +81,6 @@ PREHOOK: query: create table stats_list_bucket_1 ( c2 string ) skewed by (c1, c2) on (('466','val_466'),('287','val_287'),('82','val_82')) -stored as directories stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -96,7 +90,6 @@ POSTHOOK: query: create table stats_list_bucket_1 ( c2 string ) skewed by (c1, c2) on (('466','val_466'),('287','val_287'),('82','val_82')) -stored as directories stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -132,10 +125,10 @@ Table Type: MANAGED_TABLE Table Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"c1\":\"true\",\"c2\":\"true\"}} bucketing_version 2 - numFiles 4 + numFiles 1 numRows 500 rawDataSize 4812 - totalSize 5522 + totalSize 5293 #### A masked pattern was here #### # Storage Information @@ -146,11 +139,8 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] -Stored As SubDirectories: Yes Skewed Columns: [c1, c2] Skewed Values: [[287, val_287], [466, val_466], [82, val_82]] -#### A masked pattern was here #### -Skewed Value to Truncated Path: {[287, val_287]=/stats_list_bucket_1/c1=287/c2=val_287, [466, val_466]=/stats_list_bucket_1/c1=466/c2=val_466, [82, val_82]=/stats_list_bucket_1/c1=82/c2=val_82} Storage Desc Params: serialization.format 1 PREHOOK: query: drop table stats_list_bucket diff --git ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out index bc355544a3..a8986406f2 100644 --- ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out +++ ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out @@ -8,13 +8,11 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab_n3 PREHOOK: query: ALTER TABLE test_tab_n3 SKEWED BY (key) ON ("484") -STORED AS DIRECTORIES PREHOOK: type: ALTERTABLE_SKEWED PREHOOK: Input: default@test_tab_n3 PREHOOK: Output: default@test_tab_n3 POSTHOOK: query: ALTER TABLE test_tab_n3 SKEWED BY (key) ON ("484") -STORED AS DIRECTORIES POSTHOOK: type: ALTERTABLE_SKEWED POSTHOOK: Input: default@test_tab_n3 POSTHOOK: Output: default@test_tab_n3 @@ -74,7 +72,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.test_tab_n3 - numFiles 2 + numFiles 1 numRows 500 partition_columns part partition_columns.types string @@ -82,7 +80,7 @@ STAGE PLANS: serialization.ddl struct test_tab_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 1761 + totalSize 1685 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe @@ -157,7 +155,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.test_tab_n3 - numFiles 2 + numFiles 1 numRows 500 partition_columns part partition_columns.types string @@ -165,7 +163,7 @@ STAGE PLANS: serialization.ddl struct test_tab_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 1761 + totalSize 1685 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index af975fce1b..0137f61288 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -5882,11 +5882,6 @@ void StorageDescriptor::__set_skewedInfo(const SkewedInfo& val) { __isset.skewedInfo = true; } -void StorageDescriptor::__set_storedAsSubDirectories(const bool val) { - this->storedAsSubDirectories = val; -__isset.storedAsSubDirectories = true; -} - uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -6047,14 +6042,6 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; - case 12: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->storedAsSubDirectories); - this->__isset.storedAsSubDirectories = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -6150,11 +6137,6 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += this->skewedInfo.write(oprot); xfer += oprot->writeFieldEnd(); } - if (this->__isset.storedAsSubDirectories) { - xfer += oprot->writeFieldBegin("storedAsSubDirectories", ::apache::thrift::protocol::T_BOOL, 12); - xfer += oprot->writeBool(this->storedAsSubDirectories); - xfer += oprot->writeFieldEnd(); - } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -6173,7 +6155,6 @@ void swap(StorageDescriptor &a, StorageDescriptor &b) { swap(a.sortCols, b.sortCols); swap(a.parameters, b.parameters); swap(a.skewedInfo, b.skewedInfo); - swap(a.storedAsSubDirectories, b.storedAsSubDirectories); swap(a.__isset, b.__isset); } @@ -6189,7 +6170,6 @@ StorageDescriptor::StorageDescriptor(const StorageDescriptor& other232) { sortCols = other232.sortCols; parameters = other232.parameters; skewedInfo = other232.skewedInfo; - storedAsSubDirectories = other232.storedAsSubDirectories; __isset = other232.__isset; } StorageDescriptor& StorageDescriptor::operator=(const StorageDescriptor& other233) { @@ -6204,7 +6184,6 @@ StorageDescriptor& StorageDescriptor::operator=(const StorageDescriptor& other23 sortCols = other233.sortCols; parameters = other233.parameters; skewedInfo = other233.skewedInfo; - storedAsSubDirectories = other233.storedAsSubDirectories; __isset = other233.__isset; return *this; } @@ -6222,7 +6201,6 @@ void StorageDescriptor::printTo(std::ostream& out) const { out << ", " << "sortCols=" << to_string(sortCols); out << ", " << "parameters=" << to_string(parameters); out << ", " << "skewedInfo="; (__isset.skewedInfo ? (out << to_string(skewedInfo)) : (out << "")); - out << ", " << "storedAsSubDirectories="; (__isset.storedAsSubDirectories ? (out << to_string(storedAsSubDirectories)) : (out << "")); out << ")"; } diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 7b42182d60..adbda4535a 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -2925,7 +2925,7 @@ inline std::ostream& operator<<(std::ostream& out, const SkewedInfo& obj) } typedef struct _StorageDescriptor__isset { - _StorageDescriptor__isset() : cols(false), location(false), inputFormat(false), outputFormat(false), compressed(false), numBuckets(false), serdeInfo(false), bucketCols(false), sortCols(false), parameters(false), skewedInfo(false), storedAsSubDirectories(false) {} + _StorageDescriptor__isset() : cols(false), location(false), inputFormat(false), outputFormat(false), compressed(false), numBuckets(false), serdeInfo(false), bucketCols(false), sortCols(false), parameters(false), skewedInfo(false) {} bool cols :1; bool location :1; bool inputFormat :1; @@ -2937,7 +2937,6 @@ typedef struct _StorageDescriptor__isset { bool sortCols :1; bool parameters :1; bool skewedInfo :1; - bool storedAsSubDirectories :1; } _StorageDescriptor__isset; class StorageDescriptor { @@ -2945,7 +2944,7 @@ class StorageDescriptor { StorageDescriptor(const StorageDescriptor&); StorageDescriptor& operator=(const StorageDescriptor&); - StorageDescriptor() : location(), inputFormat(), outputFormat(), compressed(0), numBuckets(0), storedAsSubDirectories(0) { + StorageDescriptor() : location(), inputFormat(), outputFormat(), compressed(0), numBuckets(0) { } virtual ~StorageDescriptor() throw(); @@ -2960,7 +2959,6 @@ class StorageDescriptor { std::vector sortCols; std::map parameters; SkewedInfo skewedInfo; - bool storedAsSubDirectories; _StorageDescriptor__isset __isset; @@ -2986,8 +2984,6 @@ class StorageDescriptor { void __set_skewedInfo(const SkewedInfo& val); - void __set_storedAsSubDirectories(const bool val); - bool operator == (const StorageDescriptor & rhs) const { if (!(cols == rhs.cols)) @@ -3014,10 +3010,6 @@ class StorageDescriptor { return false; else if (__isset.skewedInfo && !(skewedInfo == rhs.skewedInfo)) return false; - if (__isset.storedAsSubDirectories != rhs.__isset.storedAsSubDirectories) - return false; - else if (__isset.storedAsSubDirectories && !(storedAsSubDirectories == rhs.storedAsSubDirectories)) - return false; return true; } bool operator != (const StorageDescriptor &rhs) const { diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java index 3cfa765fc1..1c7c505b31 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java @@ -49,7 +49,6 @@ private static final org.apache.thrift.protocol.TField SORT_COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("sortCols", org.apache.thrift.protocol.TType.LIST, (short)9); private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)10); private static final org.apache.thrift.protocol.TField SKEWED_INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("skewedInfo", org.apache.thrift.protocol.TType.STRUCT, (short)11); - private static final org.apache.thrift.protocol.TField STORED_AS_SUB_DIRECTORIES_FIELD_DESC = new org.apache.thrift.protocol.TField("storedAsSubDirectories", org.apache.thrift.protocol.TType.BOOL, (short)12); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -68,7 +67,6 @@ private List sortCols; // required private Map parameters; // required private SkewedInfo skewedInfo; // optional - private boolean storedAsSubDirectories; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -82,8 +80,7 @@ BUCKET_COLS((short)8, "bucketCols"), SORT_COLS((short)9, "sortCols"), PARAMETERS((short)10, "parameters"), - SKEWED_INFO((short)11, "skewedInfo"), - STORED_AS_SUB_DIRECTORIES((short)12, "storedAsSubDirectories"); + SKEWED_INFO((short)11, "skewedInfo"); private static final Map byName = new HashMap(); @@ -120,8 +117,6 @@ public static _Fields findByThriftId(int fieldId) { return PARAMETERS; case 11: // SKEWED_INFO return SKEWED_INFO; - case 12: // STORED_AS_SUB_DIRECTORIES - return STORED_AS_SUB_DIRECTORIES; default: return null; } @@ -164,9 +159,8 @@ public String getFieldName() { // isset id assignments private static final int __COMPRESSED_ISSET_ID = 0; private static final int __NUMBUCKETS_ISSET_ID = 1; - private static final int __STOREDASSUBDIRECTORIES_ISSET_ID = 2; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.SKEWED_INFO,_Fields.STORED_AS_SUB_DIRECTORIES}; + private static final _Fields optionals[] = {_Fields.SKEWED_INFO}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -197,8 +191,6 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.SKEWED_INFO, new org.apache.thrift.meta_data.FieldMetaData("skewedInfo", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SkewedInfo.class))); - tmpMap.put(_Fields.STORED_AS_SUB_DIRECTORIES, new org.apache.thrift.meta_data.FieldMetaData("storedAsSubDirectories", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(StorageDescriptor.class, metaDataMap); } @@ -277,7 +269,6 @@ public StorageDescriptor(StorageDescriptor other) { if (other.isSetSkewedInfo()) { this.skewedInfo = new SkewedInfo(other.skewedInfo); } - this.storedAsSubDirectories = other.storedAsSubDirectories; } public StorageDescriptor deepCopy() { @@ -299,8 +290,6 @@ public void clear() { this.sortCols = null; this.parameters = null; this.skewedInfo = null; - setStoredAsSubDirectoriesIsSet(false); - this.storedAsSubDirectories = false; } public int getColsSize() { @@ -610,28 +599,6 @@ public void setSkewedInfoIsSet(boolean value) { } } - public boolean isStoredAsSubDirectories() { - return this.storedAsSubDirectories; - } - - public void setStoredAsSubDirectories(boolean storedAsSubDirectories) { - this.storedAsSubDirectories = storedAsSubDirectories; - setStoredAsSubDirectoriesIsSet(true); - } - - public void unsetStoredAsSubDirectories() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __STOREDASSUBDIRECTORIES_ISSET_ID); - } - - /** Returns true if field storedAsSubDirectories is set (has been assigned a value) and false otherwise */ - public boolean isSetStoredAsSubDirectories() { - return EncodingUtils.testBit(__isset_bitfield, __STOREDASSUBDIRECTORIES_ISSET_ID); - } - - public void setStoredAsSubDirectoriesIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __STOREDASSUBDIRECTORIES_ISSET_ID, value); - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case COLS: @@ -722,14 +689,6 @@ public void setFieldValue(_Fields field, Object value) { } break; - case STORED_AS_SUB_DIRECTORIES: - if (value == null) { - unsetStoredAsSubDirectories(); - } else { - setStoredAsSubDirectories((Boolean)value); - } - break; - } } @@ -768,9 +727,6 @@ public Object getFieldValue(_Fields field) { case SKEWED_INFO: return getSkewedInfo(); - case STORED_AS_SUB_DIRECTORIES: - return isStoredAsSubDirectories(); - } throw new IllegalStateException(); } @@ -804,8 +760,6 @@ public boolean isSet(_Fields field) { return isSetParameters(); case SKEWED_INFO: return isSetSkewedInfo(); - case STORED_AS_SUB_DIRECTORIES: - return isSetStoredAsSubDirectories(); } throw new IllegalStateException(); } @@ -922,15 +876,6 @@ public boolean equals(StorageDescriptor that) { return false; } - boolean this_present_storedAsSubDirectories = true && this.isSetStoredAsSubDirectories(); - boolean that_present_storedAsSubDirectories = true && that.isSetStoredAsSubDirectories(); - if (this_present_storedAsSubDirectories || that_present_storedAsSubDirectories) { - if (!(this_present_storedAsSubDirectories && that_present_storedAsSubDirectories)) - return false; - if (this.storedAsSubDirectories != that.storedAsSubDirectories) - return false; - } - return true; } @@ -993,11 +938,6 @@ public int hashCode() { if (present_skewedInfo) list.add(skewedInfo); - boolean present_storedAsSubDirectories = true && (isSetStoredAsSubDirectories()); - list.add(present_storedAsSubDirectories); - if (present_storedAsSubDirectories) - list.add(storedAsSubDirectories); - return list.hashCode(); } @@ -1119,16 +1059,6 @@ public int compareTo(StorageDescriptor other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetStoredAsSubDirectories()).compareTo(other.isSetStoredAsSubDirectories()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStoredAsSubDirectories()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.storedAsSubDirectories, other.storedAsSubDirectories); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -1230,12 +1160,6 @@ public String toString() { } first = false; } - if (isSetStoredAsSubDirectories()) { - if (!first) sb.append(", "); - sb.append("storedAsSubDirectories:"); - sb.append(this.storedAsSubDirectories); - first = false; - } sb.append(")"); return sb.toString(); } @@ -1421,14 +1345,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, StorageDescriptor s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 12: // STORED_AS_SUB_DIRECTORIES - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.storedAsSubDirectories = iprot.readBool(); - struct.setStoredAsSubDirectoriesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1524,11 +1440,6 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, StorageDescriptor oprot.writeFieldEnd(); } } - if (struct.isSetStoredAsSubDirectories()) { - oprot.writeFieldBegin(STORED_AS_SUB_DIRECTORIES_FIELD_DESC); - oprot.writeBool(struct.storedAsSubDirectories); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1580,10 +1491,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, StorageDescriptor s if (struct.isSetSkewedInfo()) { optionals.set(10); } - if (struct.isSetStoredAsSubDirectories()) { - optionals.set(11); - } - oprot.writeBitSet(optionals, 12); + oprot.writeBitSet(optionals, 11); if (struct.isSetCols()) { { oprot.writeI32(struct.cols.size()); @@ -1642,15 +1550,12 @@ public void write(org.apache.thrift.protocol.TProtocol prot, StorageDescriptor s if (struct.isSetSkewedInfo()) { struct.skewedInfo.write(oprot); } - if (struct.isSetStoredAsSubDirectories()) { - oprot.writeBool(struct.storedAsSubDirectories); - } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, StorageDescriptor struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(12); + BitSet incoming = iprot.readBitSet(11); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list185 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); @@ -1737,10 +1642,6 @@ public void read(org.apache.thrift.protocol.TProtocol prot, StorageDescriptor st struct.skewedInfo.read(iprot); struct.setSkewedInfoIsSet(true); } - if (incoming.get(11)) { - struct.storedAsSubDirectories = iprot.readBool(); - struct.setStoredAsSubDirectoriesIsSet(true); - } } } diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php index 353c0deb91..4f28db2549 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -5909,10 +5909,6 @@ class StorageDescriptor { * @var \metastore\SkewedInfo */ public $skewedInfo = null; - /** - * @var bool - */ - public $storedAsSubDirectories = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -5985,10 +5981,6 @@ class StorageDescriptor { 'type' => TType::STRUCT, 'class' => '\metastore\SkewedInfo', ), - 12 => array( - 'var' => 'storedAsSubDirectories', - 'type' => TType::BOOL, - ), ); } if (is_array($vals)) { @@ -6025,9 +6017,6 @@ class StorageDescriptor { if (isset($vals['skewedInfo'])) { $this->skewedInfo = $vals['skewedInfo']; } - if (isset($vals['storedAsSubDirectories'])) { - $this->storedAsSubDirectories = $vals['storedAsSubDirectories']; - } } } @@ -6174,13 +6163,6 @@ class StorageDescriptor { $xfer += $input->skip($ftype); } break; - case 12: - if ($ftype == TType::BOOL) { - $xfer += $input->readBool($this->storedAsSubDirectories); - } else { - $xfer += $input->skip($ftype); - } - break; default: $xfer += $input->skip($ftype); break; @@ -6304,11 +6286,6 @@ class StorageDescriptor { $xfer += $this->skewedInfo->write($output); $xfer += $output->writeFieldEnd(); } - if ($this->storedAsSubDirectories !== null) { - $xfer += $output->writeFieldBegin('storedAsSubDirectories', TType::BOOL, 12); - $xfer += $output->writeBool($this->storedAsSubDirectories); - $xfer += $output->writeFieldEnd(); - } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index fdec32e10c..6d5af61278 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -4205,7 +4205,6 @@ class StorageDescriptor: - sortCols - parameters - skewedInfo - - storedAsSubDirectories """ thrift_spec = ( @@ -4221,10 +4220,9 @@ class StorageDescriptor: (9, TType.LIST, 'sortCols', (TType.STRUCT,(Order, Order.thrift_spec)), None, ), # 9 (10, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 10 (11, TType.STRUCT, 'skewedInfo', (SkewedInfo, SkewedInfo.thrift_spec), None, ), # 11 - (12, TType.BOOL, 'storedAsSubDirectories', None, None, ), # 12 ) - def __init__(self, cols=None, location=None, inputFormat=None, outputFormat=None, compressed=None, numBuckets=None, serdeInfo=None, bucketCols=None, sortCols=None, parameters=None, skewedInfo=None, storedAsSubDirectories=None,): + def __init__(self, cols=None, location=None, inputFormat=None, outputFormat=None, compressed=None, numBuckets=None, serdeInfo=None, bucketCols=None, sortCols=None, parameters=None, skewedInfo=None,): self.cols = cols self.location = location self.inputFormat = inputFormat @@ -4236,7 +4234,6 @@ def __init__(self, cols=None, location=None, inputFormat=None, outputFormat=None self.sortCols = sortCols self.parameters = parameters self.skewedInfo = skewedInfo - self.storedAsSubDirectories = storedAsSubDirectories def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -4327,11 +4324,6 @@ def read(self, iprot): self.skewedInfo.read(iprot) else: iprot.skip(ftype) - elif fid == 12: - if ftype == TType.BOOL: - self.storedAsSubDirectories = iprot.readBool() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -4399,10 +4391,6 @@ def write(self, oprot): oprot.writeFieldBegin('skewedInfo', TType.STRUCT, 11) self.skewedInfo.write(oprot) oprot.writeFieldEnd() - if self.storedAsSubDirectories is not None: - oprot.writeFieldBegin('storedAsSubDirectories', TType.BOOL, 12) - oprot.writeBool(self.storedAsSubDirectories) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -4423,7 +4411,6 @@ def __hash__(self): value = (value * 31) ^ hash(self.sortCols) value = (value * 31) ^ hash(self.parameters) value = (value * 31) ^ hash(self.skewedInfo) - value = (value * 31) ^ hash(self.storedAsSubDirectories) return value def __repr__(self): diff --git standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index fb73b28f62..ed25419aab 100644 --- standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -997,7 +997,6 @@ class StorageDescriptor SORTCOLS = 9 PARAMETERS = 10 SKEWEDINFO = 11 - STOREDASSUBDIRECTORIES = 12 FIELDS = { COLS => {:type => ::Thrift::Types::LIST, :name => 'cols', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}}, @@ -1010,8 +1009,7 @@ class StorageDescriptor BUCKETCOLS => {:type => ::Thrift::Types::LIST, :name => 'bucketCols', :element => {:type => ::Thrift::Types::STRING}}, SORTCOLS => {:type => ::Thrift::Types::LIST, :name => 'sortCols', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Order}}, PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}, - SKEWEDINFO => {:type => ::Thrift::Types::STRUCT, :name => 'skewedInfo', :class => ::SkewedInfo, :optional => true}, - STOREDASSUBDIRECTORIES => {:type => ::Thrift::Types::BOOL, :name => 'storedAsSubDirectories', :optional => true} + SKEWEDINFO => {:type => ::Thrift::Types::STRUCT, :name => 'skewedInfo', :class => ::SkewedInfo, :optional => true} } def struct_fields; FIELDS; end diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 5bb1985025..193417f1b2 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -640,7 +640,7 @@ private boolean isViewTable(String catName, String dbName, String tblName) throw "select " + PARTITIONS + ".\"PART_ID\", " + SDS + ".\"SD_ID\", " + SDS + ".\"CD_ID\"," + " " + SERDES + ".\"SERDE_ID\", " + PARTITIONS + ".\"CREATE_TIME\"," + " " + PARTITIONS + ".\"LAST_ACCESS_TIME\", " + SDS + ".\"INPUT_FORMAT\", " + SDS + ".\"IS_COMPRESSED\"," - + " " + SDS + ".\"IS_STOREDASSUBDIRECTORIES\", " + SDS + ".\"LOCATION\", " + SDS + ".\"NUM_BUCKETS\"," + + " " + SDS + ".\"LOCATION\", " + SDS + ".\"NUM_BUCKETS\"," + " " + SDS + ".\"OUTPUT_FORMAT\", " + SERDES + ".\"NAME\", " + SERDES + ".\"SLIB\" " + "from " + PARTITIONS + "" + " left outer join " + SDS + " on " + PARTITIONS + ".\"SD_ID\" = " + SDS + ".\"SD_ID\" " @@ -713,11 +713,9 @@ private boolean isViewTable(String catName, String dbName, String tblName) throw sd.setInputFormat((String)fields[6]); Boolean tmpBoolean = extractSqlBoolean(fields[7]); if (tmpBoolean != null) sd.setCompressed(tmpBoolean); - tmpBoolean = extractSqlBoolean(fields[8]); - if (tmpBoolean != null) sd.setStoredAsSubDirectories(tmpBoolean); - sd.setLocation((String)fields[9]); - if (fields[10] != null) sd.setNumBuckets(extractSqlInt(fields[10])); - sd.setOutputFormat((String)fields[11]); + sd.setLocation((String)fields[8]); + if (fields[10] != null) sd.setNumBuckets(extractSqlInt(fields[9])); + sd.setOutputFormat((String)fields[10]); sdSb.append(sdId).append(","); part.setSd(sd); @@ -739,8 +737,8 @@ private boolean isViewTable(String catName, String dbName, String tblName) throw throw new MetaException("SDs reuse serdes; we don't expect that"); } serde.setParameters(new HashMap()); - serde.setName((String)fields[12]); - serde.setSerializationLib((String)fields[13]); + serde.setName((String)fields[11]); + serde.setSerializationLib((String)fields[12]); serdeSb.append(serdeId).append(","); sd.setSerdeInfo(serde); Deadline.checkTimeout(); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 13ccdb145e..a4c38e7551 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -2069,7 +2069,6 @@ private StorageDescriptor convertToStorageDescriptor( convertToSkewedValues(msd.getSkewedColValues()), covertToSkewedMap(msd.getSkewedColValueLocationMaps())); sd.setSkewedInfo(skewedInfo); - sd.setStoredAsSubDirectories(msd.isStoredAsSubDirectories()); return sd; } @@ -2180,7 +2179,7 @@ private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd, convertToMStringLists((null == sd.getSkewedInfo()) ? null : sd.getSkewedInfo() .getSkewedColValues()), covertToMapMStringList((null == sd.getSkewedInfo()) ? null : sd.getSkewedInfo() - .getSkewedColValueLocationMaps()), sd.isStoredAsSubDirectories()); + .getSkewedColValueLocationMaps())); } private MCreationMetadata convertToMCreationMetadata( @@ -4148,7 +4147,6 @@ private void copyMSD(MStorageDescriptor newSd, MStorageDescriptor oldSd) { oldSd.setSkewedColValueLocationMaps(newSd.getSkewedColValueLocationMaps()); oldSd.setSortCols(newSd.getSortCols()); oldSd.setParameters(newSd.getParameters()); - oldSd.setStoredAsSubDirectories(newSd.isStoredAsSubDirectories()); } /** diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java index 433e7c7c18..7eb4f0e565 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java @@ -41,7 +41,7 @@ private String location, inputFormat, outputFormat; private int numBuckets; private Map storageDescriptorParams; - private boolean compressed, storedAsSubDirectories; + private boolean compressed; private List bucketCols, skewedColNames; private List sortCols; private List> skewedColValues; @@ -64,7 +64,6 @@ protected StorageDescriptorBuilder() { protected StorageDescriptor buildSd() throws MetaException { StorageDescriptor sd = new StorageDescriptor(getCols(), location, inputFormat, outputFormat, compressed, numBuckets, buildSerde(), bucketCols, sortCols, storageDescriptorParams); - sd.setStoredAsSubDirectories(storedAsSubDirectories); if (skewedColNames != null) { SkewedInfo skewed = new SkewedInfo(skewedColNames, skewedColValues, skewedColValueLocationMaps); @@ -110,11 +109,6 @@ public T setCompressed(boolean compressed) { return child; } - public T setStoredAsSubDirectories(boolean storedAsSubDirectories) { - this.storedAsSubDirectories = storedAsSubDirectories; - return child; - } - public T setBucketCols(List bucketCols) { this.bucketCols = bucketCols; return child; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java index 4c6ce008f8..91e9dcf856 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java @@ -35,7 +35,6 @@ private List skewedColNames; private List skewedColValues; private Map skewedColValueLocationMaps; - private boolean isStoredAsSubDirectories; public MStorageDescriptor() {} @@ -56,7 +55,7 @@ public MStorageDescriptor(MColumnDescriptor cd, String location, String inputFor String outputFormat, boolean isCompressed, int numBuckets, MSerDeInfo serDeInfo, List bucketCols, List sortOrder, Map parameters, List skewedColNames, List skewedColValues, - Map skewedColValueLocationMaps, boolean storedAsSubDirectories) { + Map skewedColValueLocationMaps) { this.cd = cd; this.location = location; this.inputFormat = inputFormat; @@ -70,7 +69,6 @@ public MStorageDescriptor(MColumnDescriptor cd, String location, String inputFor this.skewedColNames = skewedColNames; this.skewedColValues = skewedColValues; this.skewedColValueLocationMaps = skewedColValueLocationMaps; - this.isStoredAsSubDirectories = storedAsSubDirectories; } @@ -257,21 +255,4 @@ public void setSkewedColValues(List skewedColValues) { public void setSkewedColValueLocationMaps(Map listBucketColValuesMapping) { this.skewedColValueLocationMaps = listBucketColValuesMapping; } - - - /** - * @return the storedAsSubDirectories - */ - public boolean isStoredAsSubDirectories() { - return isStoredAsSubDirectories; - } - - - /** - * @param storedAsSubDirectories the storedAsSubDirectories to set - */ - public void setStoredAsSubDirectories(boolean storedAsSubDirectories) { - this.isStoredAsSubDirectories = storedAsSubDirectories; - } - } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index 742b6bf76b..a9473347a6 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -442,7 +442,7 @@ private static String getPartitionValWithInvalidCharacter(List partVals, md.update(e.getValue().getBytes(ENCODING)); } } - md.update(sd.isStoredAsSubDirectories() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); + md.update("false".getBytes(ENCODING)); } return md.digest(); diff --git standalone-metastore/src/main/resources/package.jdo standalone-metastore/src/main/resources/package.jdo index 2d2cb19991..122addf4b7 100644 --- standalone-metastore/src/main/resources/package.jdo +++ standalone-metastore/src/main/resources/package.jdo @@ -381,9 +381,6 @@ - - - diff --git standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql new file mode 100644 index 0000000000..5bfb929e77 --- /dev/null +++ standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql @@ -0,0 +1,693 @@ +-- Timestamp: 2011-09-22 15:32:02.024 +-- Source database is: /home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb +-- Connection URL is: jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb +-- Specified schema is: APP +-- appendLogs: false + +-- ---------------------------------------------- +-- DDL Statements for functions +-- ---------------------------------------------- + +CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ; + +CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ; + +-- ---------------------------------------------- +-- DDL Statements for tables +-- ---------------------------------------------- +CREATE TABLE "APP"."DBS" ( + "DB_ID" BIGINT NOT NULL, + "DESC" VARCHAR(4000), + "DB_LOCATION_URI" VARCHAR(4000) NOT NULL, + "NAME" VARCHAR(128), + "OWNER_NAME" VARCHAR(128), + "OWNER_TYPE" VARCHAR(10), + "CTLG_NAME" VARCHAR(256) NOT NULL +); + +CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT); + +CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); + +CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT); + +CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB); + +CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(767) NOT NULL, "TYPE_NAME" CLOB, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL); + +CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, "PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128)); + +CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT); + +CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); + +CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT); + +CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER); + +CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128)); + +CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "ROLE_ID" BIGINT); + +CREATE TABLE "APP"."TYPES" ("TYPES_ID" BIGINT NOT NULL, "TYPE_NAME" VARCHAR(128), "TYPE1" VARCHAR(767), "TYPE2" VARCHAR(767)); + +CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "USER_PRIV" VARCHAR(128)); + +CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); + +CREATE TABLE "APP"."PARTITION_EVENTS" ( + "PART_NAME_ID" BIGINT NOT NULL, + "CAT_NAME" VARCHAR(256), + "DB_NAME" VARCHAR(128), + "EVENT_TIME" BIGINT NOT NULL, + "EVENT_TYPE" INTEGER NOT NULL, + "PARTITION_NAME" VARCHAR(767), + "TBL_NAME" VARCHAR(256) +); + +CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128)); + +CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N'); + +CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128)); + +CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT); + +CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL); + +CREATE TABLE "APP"."TAB_COL_STATS"( + "CAT_NAME" VARCHAR(256) NOT NULL, + "DB_NAME" VARCHAR(128) NOT NULL, + "TABLE_NAME" VARCHAR(256) NOT NULL, + "COLUMN_NAME" VARCHAR(767) NOT NULL, + "COLUMN_TYPE" VARCHAR(128) NOT NULL, + "LONG_LOW_VALUE" BIGINT, + "LONG_HIGH_VALUE" BIGINT, + "DOUBLE_LOW_VALUE" DOUBLE, + "DOUBLE_HIGH_VALUE" DOUBLE, + "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), + "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000), + "NUM_DISTINCTS" BIGINT, + "NUM_NULLS" BIGINT NOT NULL, + "AVG_COL_LEN" DOUBLE, + "MAX_COL_LEN" BIGINT, + "NUM_TRUES" BIGINT, + "NUM_FALSES" BIGINT, + "LAST_ANALYZED" BIGINT, + "CS_ID" BIGINT NOT NULL, + "TBL_ID" BIGINT NOT NULL, + "BIT_VECTOR" BLOB +); + +CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB); + +CREATE TABLE "APP"."BUCKETING_COLS" ("SD_ID" BIGINT NOT NULL, "BUCKET_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "FIELD_NAME" VARCHAR(128) NOT NULL, "FIELD_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL); + +CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB); + +CREATE TABLE "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID" BIGINT NOT NULL); + +CREATE TABLE "APP"."SKEWED_STRING_LIST_VALUES" ("STRING_LIST_ID" BIGINT NOT NULL, "STRING_LIST_VALUE" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."SKEWED_COL_NAMES" ("SD_ID" BIGINT NOT NULL, "SKEWED_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ("SD_ID" BIGINT NOT NULL, "STRING_LIST_ID_KID" BIGINT NOT NULL, "LOCATION" VARCHAR(4000)); + +CREATE TABLE "APP"."SKEWED_VALUES" ("SD_ID_OID" BIGINT NOT NULL, "STRING_LIST_ID_EID" BIGINT NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as identity (start with 1), "MASTER_KEY" VARCHAR(767)); + +CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767)); + +CREATE TABLE "APP"."PART_COL_STATS"( + "CAT_NAME" VARCHAR(256) NOT NULL, + "DB_NAME" VARCHAR(128) NOT NULL, + "TABLE_NAME" VARCHAR(256) NOT NULL, + "PARTITION_NAME" VARCHAR(767) NOT NULL, + "COLUMN_NAME" VARCHAR(767) NOT NULL, + "COLUMN_TYPE" VARCHAR(128) NOT NULL, + "LONG_LOW_VALUE" BIGINT, + "LONG_HIGH_VALUE" BIGINT, + "DOUBLE_LOW_VALUE" DOUBLE, + "DOUBLE_HIGH_VALUE" DOUBLE, + "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), + "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000), + "NUM_DISTINCTS" BIGINT, + "BIT_VECTOR" BLOB, + "NUM_NULLS" BIGINT NOT NULL, + "AVG_COL_LEN" DOUBLE, + "MAX_COL_LEN" BIGINT, + "NUM_TRUES" BIGINT, + "NUM_FALSES" BIGINT, + "LAST_ANALYZED" BIGINT, + "CS_ID" BIGINT NOT NULL, + "PART_ID" BIGINT NOT NULL +); + +CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255)); + +CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000), "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "FUNC_NAME" VARCHAR(128), "FUNC_TYPE" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10)); + +CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."NOTIFICATION_LOG" ( + "NL_ID" BIGINT NOT NULL, + "CAT_NAME" VARCHAR(256), + "DB_NAME" VARCHAR(128), + "EVENT_ID" BIGINT NOT NULL, + "EVENT_TIME" INTEGER NOT NULL, + "EVENT_TYPE" VARCHAR(32) NOT NULL, + "MESSAGE" CLOB, + "TBL_NAME" VARCHAR(256), + "MESSAGE_FORMAT" VARCHAR(16) +); + +CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL); + +CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT , "PARENT_INTEGER_IDX" INTEGER, "PARENT_TBL_ID" BIGINT NOT NULL, "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL, "DEFAULT_VALUE" VARCHAR(400)); + +CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NULL, "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, "DESCRIPTION" VARCHAR(1000)); + +CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL, DEFAULT_POOL_ID BIGINT); + +CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024)); + +CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024), IS_IN_UNMANAGED INTEGER NOT NULL DEFAULT 0); + +CREATE TABLE "APP"."WM_POOL_TO_TRIGGER" (POOL_ID BIGINT NOT NULL, TRIGGER_ID BIGINT NOT NULL); + +CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, ENTITY_TYPE VARCHAR(128) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID BIGINT, ORDERING INTEGER); + +CREATE TABLE "APP"."MV_CREATION_METADATA" ( + "MV_CREATION_METADATA_ID" BIGINT NOT NULL, + "CAT_NAME" VARCHAR(256) NOT NULL, + "DB_NAME" VARCHAR(128) NOT NULL, + "TBL_NAME" VARCHAR(256) NOT NULL, + "TXN_LIST" CLOB +); + +CREATE TABLE "APP"."MV_TABLES_USED" ( + "MV_CREATION_METADATA_ID" BIGINT NOT NULL, + "TBL_ID" BIGINT NOT NULL +); + +CREATE TABLE "APP"."CTLGS" ( + "CTLG_ID" BIGINT NOT NULL, + "NAME" VARCHAR(256) UNIQUE, + "DESC" VARCHAR(4000), + "LOCATION_URI" VARCHAR(4000) NOT NULL); + +-- ---------------------------------------------- +-- DML Statements +-- ---------------------------------------------- + +INSERT INTO "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT * FROM (VALUES (1,1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "APP"."NOTIFICATION_SEQUENCE"); + +INSERT INTO "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") SELECT * FROM (VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_VAL" FROM "APP"."SEQUENCE_TABLE" WHERE "SEQUENCE_NAME" = 'org.apache.hadoop.hive.metastore.model.MNotificationLog'); + +-- ---------------------------------------------- +-- DDL Statements for indexes +-- ---------------------------------------------- + +CREATE UNIQUE INDEX "APP"."UNIQUEINDEX" ON "APP"."IDXS" ("INDEX_NAME", "ORIG_TBL_ID"); + +CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); + +CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME"); + +CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME", "CTLG_NAME"); + +CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."GLOBALPRIVILEGEINDEX" ON "APP"."GLOBAL_PRIVS" ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME"); + +CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID"); + +CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID"); + +CREATE INDEX "APP"."FUNCS_N49" ON "APP"."FUNCS" ("DB_ID"); + +CREATE INDEX "APP"."FUNC_RU_N49" ON "APP"."FUNC_RU" ("FUNC_ID"); + +CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID"); + +CREATE INDEX "APP"."CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "APP"."KEY_CONSTRAINTS"("CONSTRAINT_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NAME"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_TRIGGER" ON "APP"."WM_TRIGGER" ("RP_ID", "NAME"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME"); + +CREATE UNIQUE INDEX "APP"."MV_UNIQUE_TABLE" ON "APP"."MV_CREATION_METADATA" ("TBL_NAME", "DB_NAME"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_CATALOG" ON "APP"."CTLGS" ("NAME"); + + +-- ---------------------------------------------- +-- DDL Statements for keys +-- ---------------------------------------------- + +-- primary/unique +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_PK" PRIMARY KEY ("INDEX_ID"); + +ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_PK" PRIMARY KEY ("TBL_COLUMN_GRANT_ID"); + +ALTER TABLE "APP"."CDS" ADD CONSTRAINT "SQL110922153006460" PRIMARY KEY ("CD_ID"); + +ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_PK" PRIMARY KEY ("DB_GRANT_ID"); + +ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_PK" PRIMARY KEY ("INDEX_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME"); + +ALTER TABLE "APP"."SEQUENCE_TABLE" ADD CONSTRAINT "SEQUENCE_TABLE_PK" PRIMARY KEY ("SEQUENCE_NAME"); + +ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_PK" PRIMARY KEY ("PART_GRANT_ID"); + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_PK" PRIMARY KEY ("SD_ID"); + +ALTER TABLE "APP"."SERDES" ADD CONSTRAINT "SERDES_PK" PRIMARY KEY ("SERDE_ID"); + +ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_PK" PRIMARY KEY ("SD_ID", "COLUMN_NAME"); + +ALTER TABLE "APP"."PARTITION_EVENTS" ADD CONSTRAINT "PARTITION_EVENTS_PK" PRIMARY KEY ("PART_NAME_ID"); + +ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_PK" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME"); + +ALTER TABLE "APP"."ROLES" ADD CONSTRAINT "ROLES_PK" PRIMARY KEY ("ROLE_ID"); + +ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_PK" PRIMARY KEY ("TBL_GRANT_ID"); + +ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_PK" PRIMARY KEY ("SERDE_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."NUCLEUS_TABLES" ADD CONSTRAINT "NUCLEUS_TABLES_PK" PRIMARY KEY ("CLASS_NAME"); + +ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_PK" PRIMARY KEY ("TBL_ID"); + +ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_PK" PRIMARY KEY ("SD_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_PK" PRIMARY KEY ("DB_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_PK" PRIMARY KEY ("DB_ID"); + +ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_PK" PRIMARY KEY ("ROLE_GRANT_ID"); + +ALTER TABLE "APP"."GLOBAL_PRIVS" ADD CONSTRAINT "GLOBAL_PRIVS_PK" PRIMARY KEY ("USER_GRANT_ID"); + +ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_PK" PRIMARY KEY ("PART_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."TYPES" ADD CONSTRAINT "TYPES_PK" PRIMARY KEY ("TYPES_ID"); + +ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "SQL110922153006740" PRIMARY KEY ("CD_ID", "COLUMN_NAME"); + +ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_PK" PRIMARY KEY ("PART_COLUMN_GRANT_ID"); + +ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_PK" PRIMARY KEY ("PART_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_PK" PRIMARY KEY ("PART_ID"); + +ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_PK" PRIMARY KEY ("TBL_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."SKEWED_STRING_LIST" ADD CONSTRAINT "SKEWED_STRING_LIST_PK" PRIMARY KEY ("STRING_LIST_ID"); + +ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_PK" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_PK" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID"); + +ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_PK" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX"); + +ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_PK" PRIMARY KEY ("CS_ID"); + +ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_PK" PRIMARY KEY ("CS_ID"); + +ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_PK" PRIMARY KEY ("FUNC_ID"); + +ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_PK" PRIMARY KEY ("FUNC_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."NOTIFICATION_LOG" ADD CONSTRAINT "NOTIFICATION_LOG_PK" PRIMARY KEY ("NL_ID"); + +ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "NOTIFICATION_SEQUENCE_PK" PRIMARY KEY ("NNI_ID"); + +ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("CONSTRAINT_NAME", "POSITION"); + +ALTER TABLE "APP"."METASTORE_DB_PROPERTIES" ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY"); + +ALTER TABLE "APP"."MV_CREATION_METADATA" ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID"); + +ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLG_PK" PRIMARY KEY ("CTLG_ID"); + + +-- foreign +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK3" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_FK1" FOREIGN KEY ("INDEX_ID") REFERENCES "APP"."IDXS" ("INDEX_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEYS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK2" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_FK1" FOREIGN KEY ("TYPE_NAME") REFERENCES "APP"."TYPES" ("TYPES_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_FK1" FOREIGN KEY ("ROLE_ID") REFERENCES "APP"."ROLES" ("ROLE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "COLUMNS_V2_FK1" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_FK1" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK1" FOREIGN KEY ("SD_ID_OID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK2" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_FK" FOREIGN KEY ("TBL_ID") REFERENCES TBLS("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_FK" FOREIGN KEY ("PART_ID") REFERENCES PARTITIONS("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."VERSION" ADD CONSTRAINT "VERSION_PK" PRIMARY KEY ("VER_ID"); + +ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID"); + +ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID"); + +ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID"); + +ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "APP"."WM_TRIGGER" ("TRIGGER_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_PK" PRIMARY KEY ("MAPPING_ID"); + +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN KEY ("MV_CREATION_METADATA_ID") REFERENCES "APP"."MV_CREATION_METADATA" ("MV_CREATION_METADATA_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_CTLG_FK" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION; + +-- ---------------------------------------------- +-- DDL Statements for checks +-- ---------------------------------------------- + +ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "SQL110318025504980" CHECK (DEFERRED_REBUILD IN ('Y','N')); + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SQL110318025505550" CHECK (IS_COMPRESSED IN ('Y','N')); + +-- ---------------------------- +-- Transaction and Lock Tables +-- ---------------------------- +CREATE TABLE TXNS ( + TXN_ID bigint PRIMARY KEY, + TXN_STATE char(1) NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL, + TXN_AGENT_INFO varchar(128), + TXN_META_INFO varchar(128), + TXN_HEARTBEAT_COUNT integer, + TXN_TYPE integer +); + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID), + TC_DATABASE varchar(128) NOT NULL, + TC_TABLE varchar(128), + TC_PARTITION varchar(767), + TC_OPERATION_TYPE char(1) NOT NULL, + TC_WRITEID bigint +); + +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID bigint NOT NULL, + CTC_DATABASE varchar(128) NOT NULL, + CTC_TABLE varchar(256), + CTC_PARTITION varchar(767), + CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL, + CTC_WRITEID bigint +); + +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +CREATE TABLE NEXT_TXN_ID ( + NTXN_NEXT bigint NOT NULL +); +INSERT INTO NEXT_TXN_ID VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint NOT NULL, + HL_DB varchar(128) NOT NULL, + HL_TABLE varchar(128), + HL_PARTITION varchar(767), + HL_LOCK_STATE char(1) NOT NULL, + HL_LOCK_TYPE char(1) NOT NULL, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint, + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + HL_HEARTBEAT_COUNT integer, + HL_AGENT_INFO varchar(128), + HL_BLOCKEDBY_EXT_ID bigint, + HL_BLOCKEDBY_INT_ID bigint, + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID) +); + +CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID); + +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT bigint NOT NULL +); +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID bigint PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(128) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_TBLPROPERTIES varchar(2048), + CQ_WORKER_ID varchar(128), + CQ_START bigint, + CQ_RUN_AS varchar(128), + CQ_HIGHEST_WRITE_ID bigint, + CQ_META_INFO varchar(2048) for bit data, + CQ_HADOOP_JOB_ID varchar(32) +); + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT bigint NOT NULL +); +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID bigint PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(128) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_TBLPROPERTIES varchar(2048), + CC_WORKER_ID varchar(128), + CC_START bigint, + CC_END bigint, + CC_RUN_AS varchar(128), + CC_HIGHEST_WRITE_ID bigint, + CC_META_INFO varchar(2048) for bit data, + CC_HADOOP_JOB_ID varchar(32) +); + +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar(128) NOT NULL, + MT_KEY2 bigint NOT NULL, + MT_COMMENT varchar(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +); + +--1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK +--This is a good candidate for Index orgainzed table +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar(128) NOT NULL, + WS_TABLE varchar(128) NOT NULL, + WS_PARTITION varchar(767), + WS_TXNID bigint NOT NULL, + WS_COMMIT_ID bigint NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +); + +CREATE TABLE TXN_TO_WRITE_ID ( + T2W_TXNID bigint NOT NULL, + T2W_DATABASE varchar(128) NOT NULL, + T2W_TABLE varchar(256) NOT NULL, + T2W_WRITEID bigint NOT NULL +); + +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); + +CREATE TABLE NEXT_WRITE_ID ( + NWI_DATABASE varchar(128) NOT NULL, + NWI_TABLE varchar(256) NOT NULL, + NWI_NEXT bigint NOT NULL +); + +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); + +CREATE TABLE MIN_HISTORY_LEVEL ( + MHL_TXNID bigint NOT NULL, + MHL_MIN_OPEN_TXNID bigint NOT NULL, + PRIMARY KEY(MHL_TXNID) +); + +CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID); + +CREATE TABLE "APP"."I_SCHEMA" ( + "SCHEMA_ID" bigint primary key, + "SCHEMA_TYPE" integer not null, + "NAME" varchar(256) unique, + "DB_ID" bigint references "APP"."DBS" ("DB_ID"), + "COMPATIBILITY" integer not null, + "VALIDATION_LEVEL" integer not null, + "CAN_EVOLVE" char(1) not null, + "SCHEMA_GROUP" varchar(256), + "DESCRIPTION" varchar(4000) +); + +CREATE TABLE "APP"."SCHEMA_VERSION" ( + "SCHEMA_VERSION_ID" bigint primary key, + "SCHEMA_ID" bigint references "APP"."I_SCHEMA" ("SCHEMA_ID"), + "VERSION" integer not null, + "CREATED_AT" bigint not null, + "CD_ID" bigint references "APP"."CDS" ("CD_ID"), + "STATE" integer not null, + "DESCRIPTION" varchar(4000), + "SCHEMA_TEXT" clob, + "FINGERPRINT" varchar(256), + "SCHEMA_VERSION_NAME" varchar(256), + "SERDE_ID" bigint references "APP"."SERDES" ("SERDE_ID") +); + +CREATE UNIQUE INDEX "APP"."UNIQUE_SCHEMA_VERSION" ON "APP"."SCHEMA_VERSION" ("SCHEMA_ID", "VERSION"); + +CREATE TABLE REPL_TXN_MAP ( + RTM_REPL_POLICY varchar(256) NOT NULL, + RTM_SRC_TXN_ID bigint NOT NULL, + RTM_TARGET_TXN_ID bigint NOT NULL, + PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID) +); + +CREATE TABLE "APP"."RUNTIME_STATS" ( + "RS_ID" bigint primary key, + "CREATE_TIME" integer not null, + "WEIGHT" integer not null, + "PAYLOAD" BLOB +); + +CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME); + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '3.0.0', 'Hive release version 3.0.0'); diff --git standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql new file mode 100644 index 0000000000..a3b424807f --- /dev/null +++ standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql @@ -0,0 +1,1246 @@ +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. + +------------------------------------------------------------------ +-- DataNucleus SchemaTool (ran at 08/04/2014 15:10:15) +------------------------------------------------------------------ +-- Complete schema required for the following classes:- +-- org.apache.hadoop.hive.metastore.model.MColumnDescriptor +-- org.apache.hadoop.hive.metastore.model.MDBPrivilege +-- org.apache.hadoop.hive.metastore.model.MDatabase +-- org.apache.hadoop.hive.metastore.model.MDelegationToken +-- org.apache.hadoop.hive.metastore.model.MFieldSchema +-- org.apache.hadoop.hive.metastore.model.MFunction +-- org.apache.hadoop.hive.metastore.model.MGlobalPrivilege +-- org.apache.hadoop.hive.metastore.model.MIndex +-- org.apache.hadoop.hive.metastore.model.MMasterKey +-- org.apache.hadoop.hive.metastore.model.MOrder +-- org.apache.hadoop.hive.metastore.model.MPartition +-- org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege +-- org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics +-- org.apache.hadoop.hive.metastore.model.MPartitionEvent +-- org.apache.hadoop.hive.metastore.model.MPartitionPrivilege +-- org.apache.hadoop.hive.metastore.model.MResourceUri +-- org.apache.hadoop.hive.metastore.model.MRole +-- org.apache.hadoop.hive.metastore.model.MRoleMap +-- org.apache.hadoop.hive.metastore.model.MSerDeInfo +-- org.apache.hadoop.hive.metastore.model.MStorageDescriptor +-- org.apache.hadoop.hive.metastore.model.MStringList +-- org.apache.hadoop.hive.metastore.model.MTable +-- org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege +-- org.apache.hadoop.hive.metastore.model.MTableColumnStatistics +-- org.apache.hadoop.hive.metastore.model.MTablePrivilege +-- org.apache.hadoop.hive.metastore.model.MType +-- org.apache.hadoop.hive.metastore.model.MVersionTable +-- +-- Table MASTER_KEYS for classes [org.apache.hadoop.hive.metastore.model.MMasterKey] +CREATE TABLE MASTER_KEYS +( + KEY_ID int NOT NULL, + MASTER_KEY nvarchar(767) NULL +); + +ALTER TABLE MASTER_KEYS ADD CONSTRAINT MASTER_KEYS_PK PRIMARY KEY (KEY_ID); + +-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex] +CREATE TABLE IDXS +( + INDEX_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + DEFERRED_REBUILD bit NOT NULL, + INDEX_HANDLER_CLASS nvarchar(4000) NULL, + INDEX_NAME nvarchar(128) NULL, + INDEX_TBL_ID bigint NULL, + LAST_ACCESS_TIME int NOT NULL, + ORIG_TBL_ID bigint NULL, + SD_ID bigint NULL +); + +ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID); + +-- Table PART_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics] +CREATE TABLE PART_COL_STATS +( + CS_ID bigint NOT NULL, + AVG_COL_LEN float NULL, + "COLUMN_NAME" nvarchar(767) NOT NULL, + COLUMN_TYPE nvarchar(128) NOT NULL, + DB_NAME nvarchar(128) NOT NULL, + BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL, + BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL, + DOUBLE_HIGH_VALUE float NULL, + DOUBLE_LOW_VALUE float NULL, + LAST_ANALYZED bigint NOT NULL, + LONG_HIGH_VALUE bigint NULL, + LONG_LOW_VALUE bigint NULL, + MAX_COL_LEN bigint NULL, + NUM_DISTINCTS bigint NULL, + BIT_VECTOR varbinary(max) NULL, + NUM_FALSES bigint NULL, + NUM_NULLS bigint NOT NULL, + NUM_TRUES bigint NULL, + PART_ID bigint NULL, + PARTITION_NAME nvarchar(767) NOT NULL, + "TABLE_NAME" nvarchar(256) NOT NULL, + "CAT_NAME" nvarchar(256) NOT NULL +); + +ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID); + +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); + +-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege] +CREATE TABLE PART_PRIVS +( + PART_GRANT_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PART_ID bigint NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + PART_PRIV nvarchar(128) NULL +); + +ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID); + +-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList] +CREATE TABLE SKEWED_STRING_LIST +( + STRING_LIST_ID bigint NOT NULL +); + +ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID); + +-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole] +CREATE TABLE ROLES +( + ROLE_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + OWNER_NAME nvarchar(128) NULL, + ROLE_NAME nvarchar(128) NULL +); + +ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID); + +-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition] +CREATE TABLE PARTITIONS +( + PART_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + LAST_ACCESS_TIME int NOT NULL, + PART_NAME nvarchar(767) NULL, + SD_ID bigint NULL, + TBL_ID bigint NULL +); + +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID); + +-- Table CDS for classes [org.apache.hadoop.hive.metastore.model.MColumnDescriptor] +CREATE TABLE CDS +( + CD_ID bigint NOT NULL +); + +ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID); + +-- Table VERSION for classes [org.apache.hadoop.hive.metastore.model.MVersionTable] +CREATE TABLE VERSION +( + VER_ID bigint NOT NULL, + SCHEMA_VERSION nvarchar(127) NOT NULL, + VERSION_COMMENT nvarchar(255) NOT NULL +); + +ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID); + +-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege] +CREATE TABLE GLOBAL_PRIVS +( + USER_GRANT_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + USER_PRIV nvarchar(128) NULL +); + +ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID); + +-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege] +CREATE TABLE PART_COL_PRIVS +( + PART_COLUMN_GRANT_ID bigint NOT NULL, + "COLUMN_NAME" nvarchar(767) NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PART_ID bigint NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + PART_COL_PRIV nvarchar(128) NULL +); + +ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID); + +-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege] +CREATE TABLE DB_PRIVS +( + DB_GRANT_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + DB_ID bigint NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + DB_PRIV nvarchar(128) NULL +); + +ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID); + +-- Table TAB_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics] +CREATE TABLE TAB_COL_STATS +( + CS_ID bigint NOT NULL, + AVG_COL_LEN float NULL, + "COLUMN_NAME" nvarchar(767) NOT NULL, + COLUMN_TYPE nvarchar(128) NOT NULL, + DB_NAME nvarchar(128) NOT NULL, + BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL, + BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL, + DOUBLE_HIGH_VALUE float NULL, + DOUBLE_LOW_VALUE float NULL, + LAST_ANALYZED bigint NOT NULL, + LONG_HIGH_VALUE bigint NULL, + LONG_LOW_VALUE bigint NULL, + MAX_COL_LEN bigint NULL, + NUM_DISTINCTS bigint NULL, + BIT_VECTOR varbinary(max) NULL, + NUM_FALSES bigint NULL, + NUM_NULLS bigint NOT NULL, + NUM_TRUES bigint NULL, + TBL_ID bigint NULL, + "TABLE_NAME" nvarchar(256) NOT NULL, + "CAT_NAME" nvarchar(256) NOT NULL +); + +ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID); + +-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType] +CREATE TABLE TYPES +( + TYPES_ID bigint NOT NULL, + TYPE_NAME nvarchar(128) NULL, + TYPE1 nvarchar(767) NULL, + TYPE2 nvarchar(767) NULL +); + +ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID); + +-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege] +CREATE TABLE TBL_PRIVS +( + TBL_GRANT_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + TBL_PRIV nvarchar(128) NULL, + TBL_ID bigint NULL +); + +ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID); + +-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase] +CREATE TABLE DBS +( + DB_ID bigint NOT NULL, + "DESC" nvarchar(4000) NULL, + DB_LOCATION_URI nvarchar(4000) NOT NULL, + "NAME" nvarchar(128) NULL, + OWNER_NAME nvarchar(128) NULL, + OWNER_TYPE nvarchar(10) NULL, + CTLG_NAME nvarchar(256) +); + +ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID); + +-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] +CREATE TABLE TBL_COL_PRIVS +( + TBL_COLUMN_GRANT_ID bigint NOT NULL, + "COLUMN_NAME" nvarchar(767) NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + TBL_COL_PRIV nvarchar(128) NULL, + TBL_ID bigint NULL +); + +ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID); + +-- Table DELEGATION_TOKENS for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken] +CREATE TABLE DELEGATION_TOKENS +( + TOKEN_IDENT nvarchar(767) NOT NULL, + TOKEN nvarchar(767) NULL +); + +ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (TOKEN_IDENT); + +-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo] +CREATE TABLE SERDES +( + SERDE_ID bigint NOT NULL, + "NAME" nvarchar(128) NULL, + SLIB nvarchar(4000) NULL, + "DESCRIPTION" nvarchar(4000), + "SERIALIZER_CLASS" nvarchar(4000), + "DESERIALIZER_CLASS" nvarchar(4000), + "SERDE_TYPE" int +); + +ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID); + +-- Table FUNCS for classes [org.apache.hadoop.hive.metastore.model.MFunction] +CREATE TABLE FUNCS +( + FUNC_ID bigint NOT NULL, + CLASS_NAME nvarchar(4000) NULL, + CREATE_TIME int NOT NULL, + DB_ID bigint NULL, + FUNC_NAME nvarchar(128) NULL, + FUNC_TYPE int NOT NULL, + OWNER_NAME nvarchar(128) NULL, + OWNER_TYPE nvarchar(10) NULL +); + +ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID); + +-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap] +CREATE TABLE ROLE_MAP +( + ROLE_GRANT_ID bigint NOT NULL, + ADD_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + ROLE_ID bigint NULL +); + +ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID); + +-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable] +CREATE TABLE TBLS +( + TBL_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + DB_ID bigint NULL, + LAST_ACCESS_TIME int NOT NULL, + OWNER nvarchar(767) NULL, + OWNER_TYPE nvarchar(10) NULL, + RETENTION int NOT NULL, + SD_ID bigint NULL, + TBL_NAME nvarchar(256) NULL, + TBL_TYPE nvarchar(128) NULL, + VIEW_EXPANDED_TEXT text NULL, + VIEW_ORIGINAL_TEXT text NULL, + IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0 +); + +ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); + +-- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata] +CREATE TABLE MV_CREATION_METADATA +( + MV_CREATION_METADATA_ID bigint NOT NULL, + CAT_NAME nvarchar(256) NOT NULL, + DB_NAME nvarchar(128) NOT NULL, + TBL_NAME nvarchar(256) NOT NULL, + TXN_LIST text NULL +); + +ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID); +CREATE INDEX MV_UNIQUE_TABLE ON MV_CREATION_METADATA (TBL_NAME,DB_NAME); + + +CREATE TABLE MV_TABLES_USED +( + MV_CREATION_METADATA_ID bigint NOT NULL, + TBL_ID bigint NOT NULL +); + +ALTER TABLE MV_TABLES_USED WITH CHECK ADD FOREIGN KEY(MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID); +ALTER TABLE MV_TABLES_USED WITH CHECK ADD FOREIGN KEY(TBL_ID) REFERENCES TBLS (TBL_ID); + +-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor] +CREATE TABLE SDS +( + SD_ID bigint NOT NULL, + CD_ID bigint NULL, + INPUT_FORMAT nvarchar(4000) NULL, + IS_COMPRESSED bit NOT NULL, + LOCATION nvarchar(4000) NULL, + NUM_BUCKETS int NOT NULL, + OUTPUT_FORMAT nvarchar(4000) NULL, + SERDE_ID bigint NULL +); + +ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID); + +-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent] +CREATE TABLE PARTITION_EVENTS +( + PART_NAME_ID bigint NOT NULL, + CAT_NAME nvarchar(256) NULL, + DB_NAME nvarchar(128) NULL, + EVENT_TIME bigint NOT NULL, + EVENT_TYPE int NOT NULL, + PARTITION_NAME nvarchar(767) NULL, + TBL_NAME nvarchar(256) NULL +); + +ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID); + +-- Table SORT_COLS for join relationship +CREATE TABLE SORT_COLS +( + SD_ID bigint NOT NULL, + "COLUMN_NAME" nvarchar(767) NULL, + "ORDER" int NOT NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table SKEWED_COL_NAMES for join relationship +CREATE TABLE SKEWED_COL_NAMES +( + SD_ID bigint NOT NULL, + SKEWED_COL_NAME nvarchar(255) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table SKEWED_COL_VALUE_LOC_MAP for join relationship +CREATE TABLE SKEWED_COL_VALUE_LOC_MAP +( + SD_ID bigint NOT NULL, + STRING_LIST_ID_KID bigint NOT NULL, + LOCATION nvarchar(4000) NULL +); + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID); + +-- Table SKEWED_STRING_LIST_VALUES for join relationship +CREATE TABLE SKEWED_STRING_LIST_VALUES +( + STRING_LIST_ID bigint NOT NULL, + STRING_LIST_VALUE nvarchar(255) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX); + +-- Table PARTITION_KEY_VALS for join relationship +CREATE TABLE PARTITION_KEY_VALS +( + PART_ID bigint NOT NULL, + PART_KEY_VAL nvarchar(255) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX); + +-- Table PARTITION_KEYS for join relationship +CREATE TABLE PARTITION_KEYS +( + TBL_ID bigint NOT NULL, + PKEY_COMMENT nvarchar(4000) NULL, + PKEY_NAME nvarchar(128) NOT NULL, + PKEY_TYPE nvarchar(767) NOT NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME); + +-- Table SKEWED_VALUES for join relationship +CREATE TABLE SKEWED_VALUES +( + SD_ID_OID bigint NOT NULL, + STRING_LIST_ID_EID bigint NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX); + +-- Table SD_PARAMS for join relationship +CREATE TABLE SD_PARAMS +( + SD_ID bigint NOT NULL, + PARAM_KEY nvarchar(256) NOT NULL, + PARAM_VALUE varchar(max) NULL +); + +ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY); + +-- Table FUNC_RU for join relationship +CREATE TABLE FUNC_RU +( + FUNC_ID bigint NOT NULL, + RESOURCE_TYPE int NOT NULL, + RESOURCE_URI nvarchar(4000) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID,INTEGER_IDX); + +-- Table TYPE_FIELDS for join relationship +CREATE TABLE TYPE_FIELDS +( + TYPE_NAME bigint NOT NULL, + COMMENT nvarchar(256) NULL, + FIELD_NAME nvarchar(128) NOT NULL, + FIELD_TYPE nvarchar(767) NOT NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME); + +-- Table BUCKETING_COLS for join relationship +CREATE TABLE BUCKETING_COLS +( + SD_ID bigint NOT NULL, + BUCKET_COL_NAME nvarchar(255) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table DATABASE_PARAMS for join relationship +CREATE TABLE DATABASE_PARAMS +( + DB_ID bigint NOT NULL, + PARAM_KEY nvarchar(180) NOT NULL, + PARAM_VALUE nvarchar(4000) NULL +); + +ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY); + +-- Table INDEX_PARAMS for join relationship +CREATE TABLE INDEX_PARAMS +( + INDEX_ID bigint NOT NULL, + PARAM_KEY nvarchar(256) NOT NULL, + PARAM_VALUE nvarchar(4000) NULL +); + +ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY); + +-- Table COLUMNS_V2 for join relationship +CREATE TABLE COLUMNS_V2 +( + CD_ID bigint NOT NULL, + COMMENT nvarchar(256) NULL, + "COLUMN_NAME" nvarchar(767) NOT NULL, + TYPE_NAME varchar(max) NOT NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME"); + +-- Table SERDE_PARAMS for join relationship +CREATE TABLE SERDE_PARAMS +( + SERDE_ID bigint NOT NULL, + PARAM_KEY nvarchar(256) NOT NULL, + PARAM_VALUE varchar(max) NULL +); + +ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY); + +-- Table PARTITION_PARAMS for join relationship +CREATE TABLE PARTITION_PARAMS +( + PART_ID bigint NOT NULL, + PARAM_KEY nvarchar(256) NOT NULL, + PARAM_VALUE nvarchar(4000) NULL +); + +ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY); + +-- Table TABLE_PARAMS for join relationship +CREATE TABLE TABLE_PARAMS +( + TBL_ID bigint NOT NULL, + PARAM_KEY nvarchar(256) NOT NULL, + PARAM_VALUE varchar(max) NULL +); + +ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY); + +CREATE TABLE NOTIFICATION_LOG +( + NL_ID bigint NOT NULL, + EVENT_ID bigint NOT NULL, + EVENT_TIME int NOT NULL, + EVENT_TYPE nvarchar(32) NOT NULL, + CAT_NAME nvarchar(128) NULL, + DB_NAME nvarchar(128) NULL, + TBL_NAME nvarchar(256) NULL, + MESSAGE_FORMAT nvarchar(16), + MESSAGE text NULL +); + +ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID); + +CREATE TABLE NOTIFICATION_SEQUENCE +( + NNI_ID bigint NOT NULL, + NEXT_EVENT_ID bigint NOT NULL +); + +ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID); + +-- Tables to manage resource plans. + +CREATE TABLE WM_RESOURCEPLAN +( + RP_ID bigint NOT NULL, + "NAME" nvarchar(128) NOT NULL, + QUERY_PARALLELISM int, + STATUS nvarchar(20) NOT NULL, + DEFAULT_POOL_ID bigint +); + +ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID); + +CREATE TABLE WM_POOL +( + POOL_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + PATH nvarchar(1024) NOT NULL, + ALLOC_FRACTION float, + QUERY_PARALLELISM int, + SCHEDULING_POLICY nvarchar(1024) +); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID); + +CREATE TABLE WM_TRIGGER +( + TRIGGER_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + "NAME" nvarchar(128) NOT NULL, + TRIGGER_EXPRESSION nvarchar(1024), + ACTION_EXPRESSION nvarchar(1024), + IS_IN_UNMANAGED bit NOT NULL DEFAULT 0 +); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID); + +CREATE TABLE WM_POOL_TO_TRIGGER +( + POOL_ID bigint NOT NULL, + TRIGGER_ID bigint NOT NULL +); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID); + +CREATE TABLE WM_MAPPING +( + MAPPING_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + ENTITY_TYPE nvarchar(128) NOT NULL, + ENTITY_NAME nvarchar(128) NOT NULL, + POOL_ID bigint, + ORDERING int +); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID); + +CREATE TABLE CTLGS ( + CTLG_ID bigint primary key, + "NAME" nvarchar(256), + "DESC" nvarchar(4000), + LOCATION_URI nvarchar(4000) not null +); + +CREATE UNIQUE INDEX UNIQUE_CTLG ON CTLGS ("NAME"); + +-- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey] + +-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex] +ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) ; + +ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID); + +CREATE INDEX IDXS_N51 ON IDXS (SD_ID); + +CREATE INDEX IDXS_N50 ON IDXS (ORIG_TBL_ID); + +CREATE INDEX IDXS_N49 ON IDXS (INDEX_TBL_ID); + + +-- Constraints for table PART_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics] +ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID); + + +-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege] +ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID); + + +-- Constraints for table SKEWED_STRING_LIST for class(es) [org.apache.hadoop.hive.metastore.model.MStringList] + +-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole] +CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME); + + +-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition] +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID); + +CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID); + +CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID); + + +-- Constraints for table CDS for class(es) [org.apache.hadoop.hive.metastore.model.MColumnDescriptor] + +-- Constraints for table VERSION for class(es) [org.apache.hadoop.hive.metastore.model.MVersionTable] + +-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege] +CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege] +ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID); + +CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege] +ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ; + +CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID); + + +-- Constraints for table TAB_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics] +ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS (TBL_ID); + + +-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType] +CREATE UNIQUE INDEX UNIQUETYPE ON TYPES (TYPE_NAME); + + +-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege] +ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID); + +CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase] +CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME", "CTLG_NAME"); + + +-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] +ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID); + + +-- Constraints for table DELEGATION_TOKENS for class(es) [org.apache.hadoop.hive.metastore.model.MDelegationToken] + +-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo] + +-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunction] +ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ; + +CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME,DB_ID); + +CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID); + + +-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap] +ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) ; + +CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID); + +CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable] +ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ; + +CREATE INDEX TBLS_N50 ON TBLS (SD_ID); + +CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID); + +CREATE INDEX TBLS_N49 ON TBLS (DB_ID); + + +-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor] +ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ; + +ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ; + +CREATE INDEX SDS_N50 ON SDS (CD_ID); + +CREATE INDEX SDS_N49 ON SDS (SERDE_ID); + + +-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent] +CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME); + + +-- Constraints for table SORT_COLS +ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID); + + +-- Constraints for table SKEWED_COL_NAMES +ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX SKEWED_COL_NAMES_N49 ON SKEWED_COL_NAMES (SD_ID); + + +-- Constraints for table SKEWED_COL_VALUE_LOC_MAP +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ; + +CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N50 ON SKEWED_COL_VALUE_LOC_MAP (STRING_LIST_ID_KID); + +CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N49 ON SKEWED_COL_VALUE_LOC_MAP (SD_ID); + + +-- Constraints for table SKEWED_STRING_LIST_VALUES +ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ; + +CREATE INDEX SKEWED_STRING_LIST_VALUES_N49 ON SKEWED_STRING_LIST_VALUES (STRING_LIST_ID); + + +-- Constraints for table PARTITION_KEY_VALS +ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID); + + +-- Constraints for table PARTITION_KEYS +ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID); + + +-- Constraints for table SKEWED_VALUES +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) ; + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ; + +CREATE INDEX SKEWED_VALUES_N50 ON SKEWED_VALUES (SD_ID_OID); + +CREATE INDEX SKEWED_VALUES_N49 ON SKEWED_VALUES (STRING_LIST_ID_EID); + + +-- Constraints for table SD_PARAMS +ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID); + + +-- Constraints for table FUNC_RU +ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) ; + +CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID); + + +-- Constraints for table TYPE_FIELDS +ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) ; + +CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME); + + +-- Constraints for table BUCKETING_COLS +ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID); + + +-- Constraints for table DATABASE_PARAMS +ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ; + +CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID); + + +-- Constraints for table INDEX_PARAMS +ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) ; + +CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID); + + +-- Constraints for table COLUMNS_V2 +ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ; + +CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID); + + +-- Constraints for table SERDE_PARAMS +ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ; + +CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID); + + +-- Constraints for table PARTITION_PARAMS +ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID); + + +-- Constraints for table TABLE_PARAMS +ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID); + +-- Constraints for resource plan tables. + +CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME"); + +CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH); + +ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME"); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE DBS ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES CTLGS ("NAME"); +-- ----------------------------------------------------------------------------------------------------------------------------------------------- +-- Transaction and Lock Tables +-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file. +-- ----------------------------------------------------------------------------------------------------------------------------------------------- +CREATE TABLE COMPACTION_QUEUE( + CQ_ID bigint NOT NULL, + CQ_DATABASE nvarchar(128) NOT NULL, + CQ_TABLE nvarchar(128) NOT NULL, + CQ_PARTITION nvarchar(767) NULL, + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_TBLPROPERTIES nvarchar(2048) NULL, + CQ_WORKER_ID nvarchar(128) NULL, + CQ_START bigint NULL, + CQ_RUN_AS nvarchar(128) NULL, + CQ_HIGHEST_WRITE_ID bigint NULL, + CQ_META_INFO varbinary(2048) NULL, + CQ_HADOOP_JOB_ID nvarchar(128) NULL, +PRIMARY KEY CLUSTERED +( + CQ_ID ASC +) +); + +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID bigint NOT NULL, + CC_DATABASE nvarchar(128) NOT NULL, + CC_TABLE nvarchar(128) NOT NULL, + CC_PARTITION nvarchar(767) NULL, + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_TBLPROPERTIES nvarchar(2048) NULL, + CC_WORKER_ID nvarchar(128) NULL, + CC_START bigint NULL, + CC_END bigint NULL, + CC_RUN_AS nvarchar(128) NULL, + CC_HIGHEST_WRITE_ID bigint NULL, + CC_META_INFO varbinary(2048) NULL, + CC_HADOOP_JOB_ID nvarchar(128) NULL, +PRIMARY KEY CLUSTERED +( + CC_ID ASC +) +); + +CREATE TABLE COMPLETED_TXN_COMPONENTS( + CTC_TXNID bigint NOT NULL, + CTC_DATABASE nvarchar(128) NOT NULL, + CTC_TABLE nvarchar(128) NULL, + CTC_PARTITION nvarchar(767) NULL, + CTC_TIMESTAMP datetime2 DEFAULT CURRENT_TIMESTAMP NOT NULL, + CTC_WRITEID bigint +); + +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +CREATE TABLE HIVE_LOCKS( + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint NOT NULL, + HL_DB nvarchar(128) NOT NULL, + HL_TABLE nvarchar(128) NULL, + HL_PARTITION nvarchar(767) NULL, + HL_LOCK_STATE char(1) NOT NULL, + HL_LOCK_TYPE char(1) NOT NULL, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint NULL, + HL_USER nvarchar(128) NOT NULL, + HL_HOST nvarchar(128) NOT NULL, + HL_HEARTBEAT_COUNT int NULL, + HL_AGENT_INFO nvarchar(128) NULL, + HL_BLOCKEDBY_EXT_ID bigint NULL, + HL_BLOCKEDBY_INT_ID bigint NULL, +PRIMARY KEY CLUSTERED +( + HL_LOCK_EXT_ID ASC, + HL_LOCK_INT_ID ASC +) +); + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID( + NCQ_NEXT bigint NOT NULL +); + +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + +CREATE TABLE NEXT_LOCK_ID( + NL_NEXT bigint NOT NULL +); + +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE NEXT_TXN_ID( + NTXN_NEXT bigint NOT NULL +); + +INSERT INTO NEXT_TXN_ID VALUES(1); + +CREATE TABLE TXNS( + TXN_ID bigint NOT NULL, + TXN_STATE char(1) NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, + TXN_USER nvarchar(128) NOT NULL, + TXN_HOST nvarchar(128) NOT NULL, + TXN_AGENT_INFO nvarchar(128) NULL, + TXN_META_INFO nvarchar(128) NULL, + TXN_HEARTBEAT_COUNT int NULL, + TXN_TYPE int NULL, +PRIMARY KEY CLUSTERED +( + TXN_ID ASC +) +); + +CREATE TABLE TXN_COMPONENTS( + TC_TXNID bigint NOT NULL, + TC_DATABASE nvarchar(128) NOT NULL, + TC_TABLE nvarchar(128) NULL, + TC_PARTITION nvarchar(767) NULL, + TC_OPERATION_TYPE char(1) NOT NULL, + TC_WRITEID bigint +); + +ALTER TABLE TXN_COMPONENTS WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID); + +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +CREATE TABLE AUX_TABLE ( + MT_KEY1 nvarchar(128) NOT NULL, + MT_KEY2 bigint NOT NULL, + MT_COMMENT nvarchar(255) NULL, + PRIMARY KEY CLUSTERED +( + MT_KEY1 ASC, + MT_KEY2 ASC +) +); + +CREATE TABLE KEY_CONSTRAINTS +( + CHILD_CD_ID BIGINT, + CHILD_INTEGER_IDX INT, + CHILD_TBL_ID BIGINT, + PARENT_CD_ID BIGINT, + PARENT_INTEGER_IDX INT NOT NULL, + PARENT_TBL_ID BIGINT NOT NULL, + POSITION INT NOT NULL, + CONSTRAINT_NAME VARCHAR(400) NOT NULL, + CONSTRAINT_TYPE SMALLINT NOT NULL, + UPDATE_RULE SMALLINT, + DELETE_RULE SMALLINT, + ENABLE_VALIDATE_RELY SMALLINT NOT NULL, + DEFAULT_VALUE VARCHAR(400) +) ; + +ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION); + +CREATE INDEX CONSTRAINTS_PARENT_TBL_ID__INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID); + +CREATE INDEX CONSTRAINTS_CONSTRAINT_TYPE_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE); + +CREATE TABLE WRITE_SET ( + WS_DATABASE nvarchar(128) NOT NULL, + WS_TABLE nvarchar(128) NOT NULL, + WS_PARTITION nvarchar(767), + WS_TXNID bigint NOT NULL, + WS_COMMIT_ID bigint NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +); + +CREATE TABLE METASTORE_DB_PROPERTIES ( + PROPERTY_KEY VARCHAR(255) NOT NULL, + PROPERTY_VALUE VARCHAR(1000) NOT NULL, + DESCRIPTION VARCHAR(1000) +); + +ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY); + +CREATE TABLE TXN_TO_WRITE_ID ( + T2W_TXNID bigint NOT NULL, + T2W_DATABASE nvarchar(128) NOT NULL, + T2W_TABLE nvarchar(256) NOT NULL, + T2W_WRITEID bigint NOT NULL +); + +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); + +CREATE TABLE NEXT_WRITE_ID ( + NWI_DATABASE nvarchar(128) NOT NULL, + NWI_TABLE nvarchar(256) NOT NULL, + NWI_NEXT bigint NOT NULL +); + +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); + +CREATE TABLE MIN_HISTORY_LEVEL ( + MHL_TXNID bigint NOT NULL, + MHL_MIN_OPEN_TXNID bigint NOT NULL, +PRIMARY KEY CLUSTERED +( + MHL_TXNID ASC +) +); + +CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID); + +CREATE TABLE "I_SCHEMA" ( + "SCHEMA_ID" bigint primary key, + "SCHEMA_TYPE" int not null, + "NAME" nvarchar(256) unique, + "DB_ID" bigint references "DBS" ("DB_ID"), + "COMPATIBILITY" int not null, + "VALIDATION_LEVEL" int not null, + "CAN_EVOLVE" bit not null, + "SCHEMA_GROUP" nvarchar(256), + "DESCRIPTION" nvarchar(4000), +); + +CREATE TABLE "SCHEMA_VERSION" ( + "SCHEMA_VERSION_ID" bigint primary key, + "SCHEMA_ID" bigint references "I_SCHEMA" ("SCHEMA_ID"), + "VERSION" int not null, + "CREATED_AT" bigint not null, + "CD_ID" bigint references "CDS" ("CD_ID"), + "STATE" int not null, + "DESCRIPTION" nvarchar(4000), + "SCHEMA_TEXT" varchar(max), + "FINGERPRINT" nvarchar(256), + "SCHEMA_VERSION_NAME" nvarchar(256), + "SERDE_ID" bigint references "SERDES" ("SERDE_ID"), + unique ("SCHEMA_ID", "VERSION") +); + +CREATE TABLE REPL_TXN_MAP ( + RTM_REPL_POLICY nvarchar(256) NOT NULL, + RTM_SRC_TXN_ID bigint NOT NULL, + RTM_TARGET_TXN_ID bigint NOT NULL +); + +ALTER TABLE REPL_TXN_MAP ADD CONSTRAINT REPL_TXN_MAP_PK PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID); + +-- Table SEQUENCE_TABLE is an internal table required by DataNucleus. +-- NOTE: Some versions of SchemaTool do not automatically generate this table. +-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416 +CREATE TABLE SEQUENCE_TABLE +( + SEQUENCE_NAME nvarchar(256) NOT NULL, + NEXT_VAL bigint NOT NULL +); + +CREATE UNIQUE INDEX PART_TABLE_PK ON SEQUENCE_TABLE (SEQUENCE_NAME); + +INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1); + +CREATE TABLE RUNTIME_STATS ( + RS_ID bigint primary key, + CREATE_TIME bigint NOT NULL, + WEIGHT bigint NOT NULL, + PAYLOAD varbinary(max) +); + +CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME); + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '3.0.0', 'Hive release version 3.0.0'); diff --git standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql new file mode 100644 index 0000000000..3faa84bb93 --- /dev/null +++ standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -0,0 +1,1184 @@ +-- MySQL dump 10.13 Distrib 5.5.25, for osx10.6 (i386) +-- +-- Host: localhost Database: test +-- ------------------------------------------------------ +-- Server version 5.5.25 + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + +-- +-- Table structure for table `BUCKETING_COLS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` ( + `SD_ID` bigint(20) NOT NULL, + `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID`,`INTEGER_IDX`), + KEY `BUCKETING_COLS_N49` (`SD_ID`), + CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `CDS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `CDS` ( + `CD_ID` bigint(20) NOT NULL, + PRIMARY KEY (`CD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `COLUMNS_V2` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `COLUMNS_V2` ( + `CD_ID` bigint(20) NOT NULL, + `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TYPE_NAME` MEDIUMTEXT DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`CD_ID`,`COLUMN_NAME`), + KEY `COLUMNS_V2_N49` (`CD_ID`), + CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `DATABASE_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` ( + `DB_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`DB_ID`,`PARAM_KEY`), + KEY `DATABASE_PARAMS_N49` (`DB_ID`), + CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +CREATE TABLE `CTLGS` ( + `CTLG_ID` BIGINT PRIMARY KEY, + `NAME` VARCHAR(256), + `DESC` VARCHAR(4000), + `LOCATION_URI` VARCHAR(4000) NOT NULL, + UNIQUE KEY `UNIQUE_CATALOG` (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + + +-- +-- Table structure for table `DBS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `DBS` ( + `DB_ID` bigint(20) NOT NULL, + `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `CTLG_NAME` varchar(256) NOT NULL, + PRIMARY KEY (`DB_ID`), + UNIQUE KEY `UNIQUE_DATABASE` (`NAME`, `CTLG_NAME`), + CONSTRAINT `CTLG_FK1` FOREIGN KEY (`CTLG_NAME`) REFERENCES `CTLGS` (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `DB_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `DB_PRIVS` ( + `DB_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `DB_ID` bigint(20) DEFAULT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`DB_GRANT_ID`), + UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `DB_PRIVS_N49` (`DB_ID`), + CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `GLOBAL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` ( + `USER_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`USER_GRANT_ID`), + UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `IDXS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `IDXS` ( + `INDEX_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `DEFERRED_REBUILD` bit(1) NOT NULL, + `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INDEX_TBL_ID` bigint(20) DEFAULT NULL, + `LAST_ACCESS_TIME` int(11) NOT NULL, + `ORIG_TBL_ID` bigint(20) DEFAULT NULL, + `SD_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`INDEX_ID`), + UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`), + KEY `IDXS_N51` (`SD_ID`), + KEY `IDXS_N50` (`INDEX_TBL_ID`), + KEY `IDXS_N49` (`ORIG_TBL_ID`), + CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`), + CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`), + CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `INDEX_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` ( + `INDEX_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`), + KEY `INDEX_PARAMS_N49` (`INDEX_ID`), + CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `NUCLEUS_TABLES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` ( + `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`CLASS_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITIONS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITIONS` ( + `PART_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `LAST_ACCESS_TIME` int(11) NOT NULL, + `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SD_ID` bigint(20) DEFAULT NULL, + `TBL_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`PART_ID`), + UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`), + KEY `PARTITIONS_N49` (`TBL_ID`), + KEY `PARTITIONS_N50` (`SD_ID`), + CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`), + CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_EVENTS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` ( + `PART_NAME_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `EVENT_TIME` bigint(20) NOT NULL, + `EVENT_TYPE` int(11) NOT NULL, + `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_NAME_ID`), + KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_KEYS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` ( + `TBL_ID` bigint(20) NOT NULL, + `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`TBL_ID`,`PKEY_NAME`), + KEY `PARTITION_KEYS_N49` (`TBL_ID`), + CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_KEY_VALS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` ( + `PART_ID` bigint(20) NOT NULL, + `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`PART_ID`,`INTEGER_IDX`), + KEY `PARTITION_KEY_VALS_N49` (`PART_ID`), + CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` ( + `PART_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_ID`,`PARAM_KEY`), + KEY `PARTITION_PARAMS_N49` (`PART_ID`), + CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PART_COL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` ( + `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL, + `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_ID` bigint(20) DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_COLUMN_GRANT_ID`), + KEY `PART_COL_PRIVS_N49` (`PART_ID`), + KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PART_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PART_PRIVS` ( + `PART_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_ID` bigint(20) DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_GRANT_ID`), + KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `PART_PRIVS_N49` (`PART_ID`), + CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `ROLES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `ROLES` ( + `ROLE_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`ROLE_ID`), + UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `ROLE_MAP` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `ROLE_MAP` ( + `ROLE_GRANT_ID` bigint(20) NOT NULL, + `ADD_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `ROLE_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`ROLE_GRANT_ID`), + UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `ROLE_MAP_N49` (`ROLE_ID`), + CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SDS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SDS` ( + `SD_ID` bigint(20) NOT NULL, + `CD_ID` bigint(20) DEFAULT NULL, + `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `IS_COMPRESSED` bit(1) NOT NULL, + `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `NUM_BUCKETS` int(11) NOT NULL, + `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SERDE_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`SD_ID`), + KEY `SDS_N49` (`SERDE_ID`), + KEY `SDS_N50` (`CD_ID`), + CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`), + CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SD_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SD_PARAMS` ( + `SD_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`SD_ID`,`PARAM_KEY`), + KEY `SD_PARAMS_N49` (`SD_ID`), + CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SEQUENCE_TABLE` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` ( + `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `NEXT_VAL` bigint(20) NOT NULL, + PRIMARY KEY (`SEQUENCE_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +INSERT INTO `SEQUENCE_TABLE` (`SEQUENCE_NAME`, `NEXT_VAL`) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1); + +-- +-- Table structure for table `SERDES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SERDES` ( + `SERDE_ID` bigint(20) NOT NULL, + `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DESCRIPTION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SERIALIZER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DESERIALIZER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SERDE_TYPE` integer, + PRIMARY KEY (`SERDE_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SERDE_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` ( + `SERDE_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`), + KEY `SERDE_PARAMS_N49` (`SERDE_ID`), + CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_COL_NAMES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` ( + `SD_ID` bigint(20) NOT NULL, + `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID`,`INTEGER_IDX`), + KEY `SKEWED_COL_NAMES_N49` (`SD_ID`), + CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` ( + `SD_ID` bigint(20) NOT NULL, + `STRING_LIST_ID_KID` bigint(20) NOT NULL, + `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`), + KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`), + KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`), + CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`), + CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_STRING_LIST` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` ( + `STRING_LIST_ID` bigint(20) NOT NULL, + PRIMARY KEY (`STRING_LIST_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_STRING_LIST_VALUES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` ( + `STRING_LIST_ID` bigint(20) NOT NULL, + `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`), + KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`), + CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_VALUES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` ( + `SD_ID_OID` bigint(20) NOT NULL, + `STRING_LIST_ID_EID` bigint(20) NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`), + KEY `SKEWED_VALUES_N50` (`SD_ID_OID`), + KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`), + CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`), + CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SORT_COLS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SORT_COLS` ( + `SD_ID` bigint(20) NOT NULL, + `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `ORDER` int(11) NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID`,`INTEGER_IDX`), + KEY `SORT_COLS_N49` (`SD_ID`), + CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TABLE_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` ( + `TBL_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`TBL_ID`,`PARAM_KEY`), + KEY `TABLE_PARAMS_N49` (`TBL_ID`), + CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `MV_CREATION_METADATA` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `MV_CREATION_METADATA` ( + `MV_CREATION_METADATA_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TXN_LIST` TEXT DEFAULT NULL, + PRIMARY KEY (`MV_CREATION_METADATA_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +CREATE INDEX MV_UNIQUE_TABLE ON MV_CREATION_METADATA (TBL_NAME, DB_NAME) USING BTREE; + +-- +-- Table structure for table `TBLS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TBLS` ( + `TBL_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `DB_ID` bigint(20) DEFAULT NULL, + `LAST_ACCESS_TIME` int(11) NOT NULL, + `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `RETENTION` int(11) NOT NULL, + `SD_ID` bigint(20) DEFAULT NULL, + `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `VIEW_EXPANDED_TEXT` mediumtext, + `VIEW_ORIGINAL_TEXT` mediumtext, + `IS_REWRITE_ENABLED` bit(1) NOT NULL DEFAULT 0, + PRIMARY KEY (`TBL_ID`), + UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`), + KEY `TBLS_N50` (`SD_ID`), + KEY `TBLS_N49` (`DB_ID`), + CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`), + CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `MV_TABLES_USED` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `MV_TABLES_USED` ( + `MV_CREATION_METADATA_ID` bigint(20) NOT NULL, + `TBL_ID` bigint(20) NOT NULL, + CONSTRAINT `MV_TABLES_USED_FK1` FOREIGN KEY (`MV_CREATION_METADATA_ID`) REFERENCES `MV_CREATION_METADATA` (`MV_CREATION_METADATA_ID`), + CONSTRAINT `MV_TABLES_USED_FK2` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TBL_COL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` ( + `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL, + `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`TBL_COLUMN_GRANT_ID`), + KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `TBL_COL_PRIVS_N49` (`TBL_ID`), + CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TBL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TBL_PRIVS` ( + `TBL_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`TBL_GRANT_ID`), + KEY `TBL_PRIVS_N49` (`TBL_ID`), + KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TAB_COL_STATS` +-- +CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` ( + `CS_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TBL_ID` bigint(20) NOT NULL, + `LONG_LOW_VALUE` bigint(20), + `LONG_HIGH_VALUE` bigint(20), + `DOUBLE_HIGH_VALUE` double(53,4), + `DOUBLE_LOW_VALUE` double(53,4), + `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `NUM_NULLS` bigint(20) NOT NULL, + `NUM_DISTINCTS` bigint(20), + `BIT_VECTOR` blob, + `AVG_COL_LEN` double(53,4), + `MAX_COL_LEN` bigint(20), + `NUM_TRUES` bigint(20), + `NUM_FALSES` bigint(20), + `LAST_ANALYZED` bigint(20) NOT NULL, + PRIMARY KEY (`CS_ID`), + CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- +-- Table structure for table `PART_COL_STATS` +-- +CREATE TABLE IF NOT EXISTS `PART_COL_STATS` ( + `CS_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PART_ID` bigint(20) NOT NULL, + `LONG_LOW_VALUE` bigint(20), + `LONG_HIGH_VALUE` bigint(20), + `DOUBLE_HIGH_VALUE` double(53,4), + `DOUBLE_LOW_VALUE` double(53,4), + `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `NUM_NULLS` bigint(20) NOT NULL, + `NUM_DISTINCTS` bigint(20), + `BIT_VECTOR` blob, + `AVG_COL_LEN` double(53,4), + `MAX_COL_LEN` bigint(20), + `NUM_TRUES` bigint(20), + `NUM_FALSES` bigint(20), + `LAST_ANALYZED` bigint(20) NOT NULL, + PRIMARY KEY (`CS_ID`), + CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE; + +-- +-- Table structure for table `TYPES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TYPES` ( + `TYPES_ID` bigint(20) NOT NULL, + `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`TYPES_ID`), + UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TYPE_FIELDS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` ( + `TYPE_NAME` bigint(20) NOT NULL, + `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`), + KEY `TYPE_FIELDS_N49` (`TYPE_NAME`), + CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey] +CREATE TABLE IF NOT EXISTS `MASTER_KEYS` +( + `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT, + `MASTER_KEY` VARCHAR(767) BINARY NULL, + PRIMARY KEY (`KEY_ID`) +) ENGINE=INNODB DEFAULT CHARSET=latin1; + +-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken] +CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS` +( + `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL, + `TOKEN` VARCHAR(767) BINARY NULL, + PRIMARY KEY (`TOKEN_IDENT`) +) ENGINE=INNODB DEFAULT CHARSET=latin1; + +-- +-- Table structure for VERSION +-- +CREATE TABLE IF NOT EXISTS `VERSION` ( + `VER_ID` BIGINT NOT NULL, + `SCHEMA_VERSION` VARCHAR(127) NOT NULL, + `VERSION_COMMENT` VARCHAR(255), + PRIMARY KEY (`VER_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- +-- Table structure for table FUNCS +-- +CREATE TABLE IF NOT EXISTS `FUNCS` ( + `FUNC_ID` BIGINT(20) NOT NULL, + `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `CREATE_TIME` INT(11) NOT NULL, + `DB_ID` BIGINT(20), + `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin, + `FUNC_TYPE` INT(11) NOT NULL, + `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin, + `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin, + PRIMARY KEY (`FUNC_ID`), + UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`), + KEY `FUNCS_N49` (`DB_ID`), + CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- +-- Table structure for table FUNC_RU +-- +CREATE TABLE IF NOT EXISTS `FUNC_RU` ( + `FUNC_ID` BIGINT(20) NOT NULL, + `RESOURCE_TYPE` INT(11) NOT NULL, + `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `INTEGER_IDX` INT(11) NOT NULL, + PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`), + CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG` +( + `NL_ID` BIGINT(20) NOT NULL, + `EVENT_ID` BIGINT(20) NOT NULL, + `EVENT_TIME` INT(11) NOT NULL, + `EVENT_TYPE` varchar(32) NOT NULL, + `CAT_NAME` varchar(256), + `DB_NAME` varchar(128), + `TBL_NAME` varchar(256), + `MESSAGE` longtext, + `MESSAGE_FORMAT` varchar(16), + PRIMARY KEY (`NL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE` +( + `NNI_ID` BIGINT(20) NOT NULL, + `NEXT_EVENT_ID` BIGINT(20) NOT NULL, + PRIMARY KEY (`NNI_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +INSERT INTO `NOTIFICATION_SEQUENCE` (`NNI_ID`, `NEXT_EVENT_ID`) SELECT * from (select 1 as `NNI_ID`, 1 as `NOTIFICATION_SEQUENCE`) a WHERE (SELECT COUNT(*) FROM `NOTIFICATION_SEQUENCE`) = 0; + +CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS` +( + `CHILD_CD_ID` BIGINT, + `CHILD_INTEGER_IDX` INT(11), + `CHILD_TBL_ID` BIGINT, + `PARENT_CD_ID` BIGINT, + `PARENT_INTEGER_IDX` INT(11) NOT NULL, + `PARENT_TBL_ID` BIGINT NOT NULL, + `POSITION` BIGINT NOT NULL, + `CONSTRAINT_NAME` VARCHAR(400) NOT NULL, + `CONSTRAINT_TYPE` SMALLINT(6) NOT NULL, + `UPDATE_RULE` SMALLINT(6), + `DELETE_RULE` SMALLINT(6), + `ENABLE_VALIDATE_RELY` SMALLINT(6) NOT NULL, + `DEFAULT_VALUE` VARCHAR(400), + PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX `CONSTRAINTS_PARENT_TABLE_ID_INDEX` ON KEY_CONSTRAINTS (`PARENT_TBL_ID`) USING BTREE; + +CREATE INDEX `CONSTRAINTS_CONSTRAINT_TYPE_INDEX` ON KEY_CONSTRAINTS (`CONSTRAINT_TYPE`) USING BTREE; + +-- ----------------------------- +-- Metastore DB Properties table +-- ----------------------------- +CREATE TABLE IF NOT EXISTS `METASTORE_DB_PROPERTIES` ( + `PROPERTY_KEY` varchar(255) NOT NULL, + `PROPERTY_VALUE` varchar(1000) NOT NULL, + `DESCRIPTION` varchar(1000), + PRIMARY KEY(`PROPERTY_KEY`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + + +-- --------------------- +-- Resource plan tables. +-- --------------------- +CREATE TABLE IF NOT EXISTS WM_RESOURCEPLAN ( + `RP_ID` bigint(20) NOT NULL, + `NAME` varchar(128) NOT NULL, + `QUERY_PARALLELISM` int(11), + `STATUS` varchar(20) NOT NULL, + `DEFAULT_POOL_ID` bigint(20), + PRIMARY KEY (`RP_ID`), + UNIQUE KEY `UNIQUE_WM_RESOURCEPLAN` (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS WM_POOL +( + `POOL_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `PATH` varchar(767) NOT NULL, + `ALLOC_FRACTION` DOUBLE, + `QUERY_PARALLELISM` int(11), + `SCHEDULING_POLICY` varchar(767), + PRIMARY KEY (`POOL_ID`), + UNIQUE KEY `UNIQUE_WM_POOL` (`RP_ID`, `PATH`), + CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +ALTER TABLE `WM_RESOURCEPLAN` ADD CONSTRAINT `WM_RESOURCEPLAN_FK1` FOREIGN KEY (`DEFAULT_POOL_ID`) REFERENCES `WM_POOL`(`POOL_ID`); + +CREATE TABLE IF NOT EXISTS WM_TRIGGER +( + `TRIGGER_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `NAME` varchar(128) NOT NULL, + `TRIGGER_EXPRESSION` varchar(1024), + `ACTION_EXPRESSION` varchar(1024), + `IS_IN_UNMANAGED` bit(1) NOT NULL DEFAULT 0, + PRIMARY KEY (`TRIGGER_ID`), + UNIQUE KEY `UNIQUE_WM_TRIGGER` (`RP_ID`, `NAME`), + CONSTRAINT `WM_TRIGGER_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS WM_POOL_TO_TRIGGER +( + `POOL_ID` bigint(20) NOT NULL, + `TRIGGER_ID` bigint(20) NOT NULL, + PRIMARY KEY (`POOL_ID`, `TRIGGER_ID`), + CONSTRAINT `WM_POOL_TO_TRIGGER_FK1` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`), + CONSTRAINT `WM_POOL_TO_TRIGGER_FK2` FOREIGN KEY (`TRIGGER_ID`) REFERENCES `WM_TRIGGER` (`TRIGGER_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS WM_MAPPING +( + `MAPPING_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `ENTITY_TYPE` varchar(128) NOT NULL, + `ENTITY_NAME` varchar(128) NOT NULL, + `POOL_ID` bigint(20), + `ORDERING` int, + PRIMARY KEY (`MAPPING_ID`), + UNIQUE KEY `UNIQUE_WM_MAPPING` (`RP_ID`, `ENTITY_TYPE`, `ENTITY_NAME`), + CONSTRAINT `WM_MAPPING_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`), + CONSTRAINT `WM_MAPPING_FK2` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- ---------------------------- +-- Transaction and Lock Tables +-- ---------------------------- +CREATE TABLE TXNS ( + TXN_ID bigint PRIMARY KEY, + TXN_STATE char(1) NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL, + TXN_AGENT_INFO varchar(128), + TXN_META_INFO varchar(128), + TXN_HEARTBEAT_COUNT int, + TXN_TYPE int +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID bigint NOT NULL, + TC_DATABASE varchar(128) NOT NULL, + TC_TABLE varchar(128), + TC_PARTITION varchar(767), + TC_OPERATION_TYPE char(1) NOT NULL, + TC_WRITEID bigint, + FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID bigint NOT NULL, + CTC_DATABASE varchar(128) NOT NULL, + CTC_TABLE varchar(256), + CTC_PARTITION varchar(767), + CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL, + CTC_WRITEID bigint +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE; + +CREATE TABLE NEXT_TXN_ID ( + NTXN_NEXT bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO NEXT_TXN_ID VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint NOT NULL, + HL_DB varchar(128) NOT NULL, + HL_TABLE varchar(128), + HL_PARTITION varchar(767), + HL_LOCK_STATE char(1) not null, + HL_LOCK_TYPE char(1) not null, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint, + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + HL_HEARTBEAT_COUNT int, + HL_AGENT_INFO varchar(128), + HL_BLOCKEDBY_EXT_ID bigint, + HL_BLOCKEDBY_INT_ID bigint, + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID), + KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID); + +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID bigint PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(128) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_TBLPROPERTIES varchar(2048), + CQ_WORKER_ID varchar(128), + CQ_START bigint, + CQ_RUN_AS varchar(128), + CQ_HIGHEST_WRITE_ID bigint, + CQ_META_INFO varbinary(2048), + CQ_HADOOP_JOB_ID varchar(32) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID bigint PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(128) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_TBLPROPERTIES varchar(2048), + CC_WORKER_ID varchar(128), + CC_START bigint, + CC_END bigint, + CC_RUN_AS varchar(128), + CC_HIGHEST_WRITE_ID bigint, + CC_META_INFO varbinary(2048), + CC_HADOOP_JOB_ID varchar(32) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar(128) NOT NULL, + MT_KEY2 bigint NOT NULL, + MT_COMMENT varchar(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar(128) NOT NULL, + WS_TABLE varchar(128) NOT NULL, + WS_PARTITION varchar(767), + WS_TXNID bigint NOT NULL, + WS_COMMIT_ID bigint NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE TXN_TO_WRITE_ID ( + T2W_TXNID bigint NOT NULL, + T2W_DATABASE varchar(128) NOT NULL, + T2W_TABLE varchar(256) NOT NULL, + T2W_WRITEID bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); + +CREATE TABLE NEXT_WRITE_ID ( + NWI_DATABASE varchar(128) NOT NULL, + NWI_TABLE varchar(256) NOT NULL, + NWI_NEXT bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); + +CREATE TABLE MIN_HISTORY_LEVEL ( + MHL_TXNID bigint NOT NULL, + MHL_MIN_OPEN_TXNID bigint NOT NULL, + PRIMARY KEY(MHL_TXNID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID); + +CREATE TABLE `I_SCHEMA` ( + `SCHEMA_ID` BIGINT PRIMARY KEY, + `SCHEMA_TYPE` INTEGER NOT NULL, + `NAME` VARCHAR(256), + `DB_ID` BIGINT, + `COMPATIBILITY` INTEGER NOT NULL, + `VALIDATION_LEVEL` INTEGER NOT NULL, + `CAN_EVOLVE` bit(1) NOT NULL, + `SCHEMA_GROUP` VARCHAR(256), + `DESCRIPTION` VARCHAR(4000), + FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`), + KEY `UNIQUE_NAME` (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE `SCHEMA_VERSION` ( + `SCHEMA_VERSION_ID` bigint primary key, + `SCHEMA_ID` BIGINT, + `VERSION` INTEGER NOT NULL, + `CREATED_AT` BIGINT NOT NULL, + `CD_ID` BIGINT, + `STATE` INTEGER NOT NULL, + `DESCRIPTION` VARCHAR(4000), + `SCHEMA_TEXT` mediumtext, + `FINGERPRINT` VARCHAR(256), + `SCHEMA_VERSION_NAME` VARCHAR(256), + `SERDE_ID` bigint, + FOREIGN KEY (`SCHEMA_ID`) REFERENCES `I_SCHEMA` (`SCHEMA_ID`), + FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`), + FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`), + KEY `UNIQUE_VERSION` (`SCHEMA_ID`, `VERSION`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE REPL_TXN_MAP ( + RTM_REPL_POLICY varchar(256) NOT NULL, + RTM_SRC_TXN_ID bigint NOT NULL, + RTM_TARGET_TXN_ID bigint NOT NULL, + PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + + +CREATE TABLE RUNTIME_STATS ( + RS_ID bigint primary key, + CREATE_TIME bigint NOT NULL, + WEIGHT bigint NOT NULL, + PAYLOAD blob +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME); + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '3.0.0', 'Hive release version 3.0.0'); + +/*!40101 SET character_set_client = @saved_cs_client */; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +-- Dump completed on 2012-08-23 0:56:31 diff --git standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql new file mode 100644 index 0000000000..5de4d185b3 --- /dev/null +++ standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql @@ -0,0 +1,1141 @@ +-- Table SEQUENCE_TABLE is an internal table required by DataNucleus. +-- NOTE: Some versions of SchemaTool do not automatically generate this table. +-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416 +CREATE TABLE SEQUENCE_TABLE +( + SEQUENCE_NAME VARCHAR2(255) NOT NULL, + NEXT_VAL NUMBER NOT NULL +); + +ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME); + +INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1); + +-- Table NUCLEUS_TABLES is an internal table required by DataNucleus. +-- This table is required if datanucleus.autoStartMechanism=SchemaTable +-- NOTE: Some versions of SchemaTool do not automatically generate this table. +-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416 +CREATE TABLE NUCLEUS_TABLES +( + CLASS_NAME VARCHAR2(128) NOT NULL, + TABLE_NAME VARCHAR2(128) NOT NULL, + TYPE VARCHAR2(4) NOT NULL, + OWNER VARCHAR2(2) NOT NULL, + VERSION VARCHAR2(20) NOT NULL, + INTERFACE_NAME VARCHAR2(255) NULL +); + +ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME); + +-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege] +CREATE TABLE PART_COL_PRIVS +( + PART_COLUMN_GRANT_ID NUMBER NOT NULL, + "COLUMN_NAME" VARCHAR2(767) NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PART_ID NUMBER NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + PART_COL_PRIV VARCHAR2(128) NULL +); + +ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID); + +-- Table CDS. +CREATE TABLE CDS +( + CD_ID NUMBER NOT NULL +); + +ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID); + +-- Table COLUMNS_V2 for join relationship +CREATE TABLE COLUMNS_V2 +( + CD_ID NUMBER NOT NULL, + "COMMENT" VARCHAR2(256) NULL, + "COLUMN_NAME" VARCHAR2(767) NOT NULL, + TYPE_NAME CLOB NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME"); + +-- Table PARTITION_KEY_VALS for join relationship +CREATE TABLE PARTITION_KEY_VALS +( + PART_ID NUMBER NOT NULL, + PART_KEY_VAL VARCHAR2(256) NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX); + +CREATE TABLE CTLGS ( + CTLG_ID NUMBER PRIMARY KEY, + "NAME" VARCHAR2(256), + "DESC" VARCHAR2(4000), + LOCATION_URI VARCHAR2(4000) NOT NULL, + UNIQUE ("NAME") +); + +-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase] +CREATE TABLE DBS +( + DB_ID NUMBER NOT NULL, + "DESC" VARCHAR2(4000) NULL, + DB_LOCATION_URI VARCHAR2(4000) NOT NULL, + "NAME" VARCHAR2(128) NULL, + OWNER_NAME VARCHAR2(128) NULL, + OWNER_TYPE VARCHAR2(10) NULL, + CTLG_NAME VARCHAR2(256) +); + +ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID); + +-- Table PARTITION_PARAMS for join relationship +CREATE TABLE PARTITION_PARAMS +( + PART_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(256) NOT NULL, + PARAM_VALUE VARCHAR2(4000) NULL +); + +ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY); + +-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo] +CREATE TABLE SERDES +( + SERDE_ID NUMBER NOT NULL, + "NAME" VARCHAR2(128) NULL, + SLIB VARCHAR2(4000) NULL, + "DESCRIPTION" VARCHAR2(4000), + "SERIALIZER_CLASS" VARCHAR2(4000), + "DESERIALIZER_CLASS" VARCHAR2(4000), + "SERDE_TYPE" NUMBER +); + +ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID); + +-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType] +CREATE TABLE TYPES +( + TYPES_ID NUMBER NOT NULL, + TYPE_NAME VARCHAR2(128) NULL, + TYPE1 VARCHAR2(767) NULL, + TYPE2 VARCHAR2(767) NULL +); + +ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID); + +-- Table PARTITION_KEYS for join relationship +CREATE TABLE PARTITION_KEYS +( + TBL_ID NUMBER NOT NULL, + PKEY_COMMENT VARCHAR2(4000) NULL, + PKEY_NAME VARCHAR2(128) NOT NULL, + PKEY_TYPE VARCHAR2(767) NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME); + +-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole] +CREATE TABLE ROLES +( + ROLE_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + OWNER_NAME VARCHAR2(128) NULL, + ROLE_NAME VARCHAR2(128) NULL +); + +ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID); + +-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition] +CREATE TABLE PARTITIONS +( + PART_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + LAST_ACCESS_TIME NUMBER (10) NOT NULL, + PART_NAME VARCHAR2(767) NULL, + SD_ID NUMBER NULL, + TBL_ID NUMBER NULL +); + +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID); + +-- Table INDEX_PARAMS for join relationship +CREATE TABLE INDEX_PARAMS +( + INDEX_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(256) NOT NULL, + PARAM_VALUE VARCHAR2(4000) NULL +); + +ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY); + +-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] +CREATE TABLE TBL_COL_PRIVS +( + TBL_COLUMN_GRANT_ID NUMBER NOT NULL, + "COLUMN_NAME" VARCHAR2(767) NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + TBL_COL_PRIV VARCHAR2(128) NULL, + TBL_ID NUMBER NULL +); + +ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID); + +-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex] +CREATE TABLE IDXS +( + INDEX_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)), + INDEX_HANDLER_CLASS VARCHAR2(4000) NULL, + INDEX_NAME VARCHAR2(128) NULL, + INDEX_TBL_ID NUMBER NULL, + LAST_ACCESS_TIME NUMBER (10) NOT NULL, + ORIG_TBL_ID NUMBER NULL, + SD_ID NUMBER NULL +); + +ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID); + +-- Table BUCKETING_COLS for join relationship +CREATE TABLE BUCKETING_COLS +( + SD_ID NUMBER NOT NULL, + BUCKET_COL_NAME VARCHAR2(256) NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table TYPE_FIELDS for join relationship +CREATE TABLE TYPE_FIELDS +( + TYPE_NAME NUMBER NOT NULL, + "COMMENT" VARCHAR2(256) NULL, + FIELD_NAME VARCHAR2(128) NOT NULL, + FIELD_TYPE VARCHAR2(767) NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME); + +-- Table SD_PARAMS for join relationship +CREATE TABLE SD_PARAMS +( + SD_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(256) NOT NULL, + PARAM_VALUE CLOB NULL +); + +ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY); + +-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege] +CREATE TABLE GLOBAL_PRIVS +( + USER_GRANT_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + USER_PRIV VARCHAR2(128) NULL +); + +ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID); + +-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor] +CREATE TABLE SDS +( + SD_ID NUMBER NOT NULL, + CD_ID NUMBER NULL, + INPUT_FORMAT VARCHAR2(4000) NULL, + IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)), + LOCATION VARCHAR2(4000) NULL, + NUM_BUCKETS NUMBER (10) NOT NULL, + OUTPUT_FORMAT VARCHAR2(4000) NULL, + SERDE_ID NUMBER NULL +); + +ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID); + +-- Table TABLE_PARAMS for join relationship +CREATE TABLE TABLE_PARAMS +( + TBL_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(256) NOT NULL, + PARAM_VALUE CLOB NULL +); + +ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY); + +-- Table SORT_COLS for join relationship +CREATE TABLE SORT_COLS +( + SD_ID NUMBER NOT NULL, + "COLUMN_NAME" VARCHAR2(767) NULL, + "ORDER" NUMBER (10) NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege] +CREATE TABLE TBL_PRIVS +( + TBL_GRANT_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + TBL_PRIV VARCHAR2(128) NULL, + TBL_ID NUMBER NULL +); + +ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID); + +-- Table DATABASE_PARAMS for join relationship +CREATE TABLE DATABASE_PARAMS +( + DB_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(180) NOT NULL, + PARAM_VALUE VARCHAR2(4000) NULL +); + +ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY); + +-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap] +CREATE TABLE ROLE_MAP +( + ROLE_GRANT_ID NUMBER NOT NULL, + ADD_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + ROLE_ID NUMBER NULL +); + +ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID); + +-- Table SERDE_PARAMS for join relationship +CREATE TABLE SERDE_PARAMS +( + SERDE_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(256) NOT NULL, + PARAM_VALUE CLOB NULL +); + +ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY); + +-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege] +CREATE TABLE PART_PRIVS +( + PART_GRANT_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PART_ID NUMBER NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + PART_PRIV VARCHAR2(128) NULL +); + +ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID); + +-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege] +CREATE TABLE DB_PRIVS +( + DB_GRANT_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + DB_ID NUMBER NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + DB_PRIV VARCHAR2(128) NULL +); + +ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID); + +-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable] +CREATE TABLE TBLS +( + TBL_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + DB_ID NUMBER NULL, + LAST_ACCESS_TIME NUMBER (10) NOT NULL, + OWNER VARCHAR2(767) NULL, + OWNER_TYPE VARCHAR2(10) NULL, + RETENTION NUMBER (10) NOT NULL, + SD_ID NUMBER NULL, + TBL_NAME VARCHAR2(256) NULL, + TBL_TYPE VARCHAR2(128) NULL, + VIEW_EXPANDED_TEXT CLOB NULL, + VIEW_ORIGINAL_TEXT CLOB NULL, + IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)) +); + +ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); + +-- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata] +CREATE TABLE MV_CREATION_METADATA +( + MV_CREATION_METADATA_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NOT NULL, + DB_NAME VARCHAR2(128) NOT NULL, + TBL_NAME VARCHAR2(256) NOT NULL, + TXN_LIST CLOB NULL +); + +ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID); + +CREATE UNIQUE INDEX UNIQUE_TABLE ON MV_CREATION_METADATA ("DB_NAME", "TBL_NAME"); + +-- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata] +CREATE TABLE MV_TABLES_USED +( + MV_CREATION_METADATA_ID NUMBER NOT NULL, + TBL_ID NUMBER NOT NULL +); + +-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent] +CREATE TABLE PARTITION_EVENTS +( + PART_NAME_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NULL, + DB_NAME VARCHAR2(128) NULL, + EVENT_TIME NUMBER NOT NULL, + EVENT_TYPE NUMBER (10) NOT NULL, + PARTITION_NAME VARCHAR2(767) NULL, + TBL_NAME VARCHAR2(256) NULL +); + +ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID); + +-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList] +CREATE TABLE SKEWED_STRING_LIST +( + STRING_LIST_ID NUMBER NOT NULL +); + +ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID); + +CREATE TABLE SKEWED_STRING_LIST_VALUES +( + STRING_LIST_ID NUMBER NOT NULL, + "STRING_LIST_VALUE" VARCHAR2(256) NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX); + +ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ; + +CREATE TABLE SKEWED_COL_NAMES +( + SD_ID NUMBER NOT NULL, + "SKEWED_COL_NAME" VARCHAR2(256) NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE TABLE SKEWED_COL_VALUE_LOC_MAP +( + SD_ID NUMBER NOT NULL, + STRING_LIST_ID_KID NUMBER NOT NULL, + "LOCATION" VARCHAR2(4000) NULL +); + +CREATE TABLE MASTER_KEYS +( + KEY_ID NUMBER (10) NOT NULL, + MASTER_KEY VARCHAR2(767) NULL +); + +CREATE TABLE DELEGATION_TOKENS +( + TOKEN_IDENT VARCHAR2(767) NOT NULL, + TOKEN VARCHAR2(767) NULL +); + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID); + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ; + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE TABLE SKEWED_VALUES +( + SD_ID_OID NUMBER NOT NULL, + STRING_LIST_ID_EID NUMBER NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX); + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ; + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +ALTER TABLE DBS ADD CONSTRAINT CTLGS_FK FOREIGN KEY (CTLG_NAME) REFERENCES CTLGS ("NAME") INITIALLY DEFERRED; + +-- column statistics + +CREATE TABLE TAB_COL_STATS ( + CS_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NOT NULL, + DB_NAME VARCHAR2(128) NOT NULL, + TABLE_NAME VARCHAR2(256) NOT NULL, + COLUMN_NAME VARCHAR2(767) NOT NULL, + COLUMN_TYPE VARCHAR2(128) NOT NULL, + TBL_ID NUMBER NOT NULL, + LONG_LOW_VALUE NUMBER, + LONG_HIGH_VALUE NUMBER, + DOUBLE_LOW_VALUE NUMBER, + DOUBLE_HIGH_VALUE NUMBER, + BIG_DECIMAL_LOW_VALUE VARCHAR2(4000), + BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000), + NUM_NULLS NUMBER NOT NULL, + NUM_DISTINCTS NUMBER, + BIT_VECTOR BLOB, + AVG_COL_LEN NUMBER, + MAX_COL_LEN NUMBER, + NUM_TRUES NUMBER, + NUM_FALSES NUMBER, + LAST_ANALYZED NUMBER NOT NULL +); + +CREATE TABLE VERSION ( + VER_ID NUMBER NOT NULL, + SCHEMA_VERSION VARCHAR(127) NOT NULL, + VERSION_COMMENT VARCHAR(255) +); +ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID); + +ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID); + +ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID); + +CREATE TABLE PART_COL_STATS ( + CS_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NOT NULL, + DB_NAME VARCHAR2(128) NOT NULL, + TABLE_NAME VARCHAR2(256) NOT NULL, + PARTITION_NAME VARCHAR2(767) NOT NULL, + COLUMN_NAME VARCHAR2(767) NOT NULL, + COLUMN_TYPE VARCHAR2(128) NOT NULL, + PART_ID NUMBER NOT NULL, + LONG_LOW_VALUE NUMBER, + LONG_HIGH_VALUE NUMBER, + DOUBLE_LOW_VALUE NUMBER, + DOUBLE_HIGH_VALUE NUMBER, + BIG_DECIMAL_LOW_VALUE VARCHAR2(4000), + BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000), + NUM_NULLS NUMBER NOT NULL, + NUM_DISTINCTS NUMBER, + BIT_VECTOR BLOB, + AVG_COL_LEN NUMBER, + MAX_COL_LEN NUMBER, + NUM_TRUES NUMBER, + NUM_FALSES NUMBER, + LAST_ANALYZED NUMBER NOT NULL +); + +ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID); + +ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED; + +CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID); + +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); + +CREATE TABLE FUNCS ( + FUNC_ID NUMBER NOT NULL, + CLASS_NAME VARCHAR2(4000), + CREATE_TIME NUMBER(10) NOT NULL, + DB_ID NUMBER, + FUNC_NAME VARCHAR2(128), + FUNC_TYPE NUMBER(10) NOT NULL, + OWNER_NAME VARCHAR2(128), + OWNER_TYPE VARCHAR2(10) +); + +ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID); + +CREATE TABLE FUNC_RU ( + FUNC_ID NUMBER NOT NULL, + RESOURCE_TYPE NUMBER(10) NOT NULL, + RESOURCE_URI VARCHAR2(4000), + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX); + +CREATE TABLE NOTIFICATION_LOG +( + NL_ID NUMBER NOT NULL, + EVENT_ID NUMBER NOT NULL, + EVENT_TIME NUMBER(10) NOT NULL, + EVENT_TYPE VARCHAR2(32) NOT NULL, + CAT_NAME VARCHAR2(256), + DB_NAME VARCHAR2(128), + TBL_NAME VARCHAR2(256), + MESSAGE CLOB NULL, + MESSAGE_FORMAT VARCHAR(16) NULL +); + +ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID); + +CREATE TABLE NOTIFICATION_SEQUENCE +( + NNI_ID NUMBER NOT NULL, + NEXT_EVENT_ID NUMBER NOT NULL +); + +ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID); + +INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 FROM DUAL WHERE NOT EXISTS ( SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE); + +-- Tables to manage resource plans. + +CREATE TABLE WM_RESOURCEPLAN +( + RP_ID NUMBER NOT NULL, + "NAME" VARCHAR2(128) NOT NULL, + QUERY_PARALLELISM NUMBER(10), + STATUS VARCHAR2(20) NOT NULL, + DEFAULT_POOL_ID NUMBER +); + +ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID); + +CREATE TABLE WM_POOL +( + POOL_ID NUMBER NOT NULL, + RP_ID NUMBER NOT NULL, + PATH VARCHAR2(1024) NOT NULL, + ALLOC_FRACTION NUMBER, + QUERY_PARALLELISM NUMBER(10), + SCHEDULING_POLICY VARCHAR2(1024) +); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID); + +CREATE TABLE WM_TRIGGER +( + TRIGGER_ID NUMBER NOT NULL, + RP_ID NUMBER NOT NULL, + "NAME" VARCHAR2(128) NOT NULL, + TRIGGER_EXPRESSION VARCHAR2(1024), + ACTION_EXPRESSION VARCHAR2(1024), + IS_IN_UNMANAGED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_IN_UNMANAGED IN (1,0)) +); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID); + +CREATE TABLE WM_POOL_TO_TRIGGER +( + POOL_ID NUMBER NOT NULL, + TRIGGER_ID NUMBER NOT NULL +); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID); + +CREATE TABLE WM_MAPPING +( + MAPPING_ID NUMBER NOT NULL, + RP_ID NUMBER NOT NULL, + ENTITY_TYPE VARCHAR2(128) NOT NULL, + ENTITY_NAME VARCHAR2(128) NOT NULL, + POOL_ID NUMBER NOT NULL, + ORDERING NUMBER(10) +); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID); + +-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege] +ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ; + +CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID); + +CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table COLUMNS_V2 +ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ; + +CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID); + + +-- Constraints for table PARTITION_KEY_VALS +ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID); + + +-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase] +CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME", CTLG_NAME); + + +-- Constraints for table PARTITION_PARAMS +ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID); + + +-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo] + +-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType] +CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME); + + +-- Constraints for table PARTITION_KEYS +ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID); + + +-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole] +CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME); + + +-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition] +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID); + +CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID); + +CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID); + + +-- Constraints for table INDEX_PARAMS +ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ; + +CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID); + + +-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] +ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID); + + +-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex] +ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID); + +CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID); + +CREATE INDEX IDXS_N51 ON IDXS (SD_ID); + +CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID); + + +-- Constraints for table BUCKETING_COLS +ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID); + + +-- Constraints for table TYPE_FIELDS +ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ; + +CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME); + + +-- Constraints for table SD_PARAMS +ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID); + + +-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege] +CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor] +ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ; +ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ; + +CREATE INDEX SDS_N49 ON SDS (SERDE_ID); +CREATE INDEX SDS_N50 ON SDS (CD_ID); + + +-- Constraints for table TABLE_PARAMS +ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID); + + +-- Constraints for table SORT_COLS +ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID); + + +-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege] +ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID); + +CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table DATABASE_PARAMS +ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ; + +CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID); + + +-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap] +ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ; + +CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID); + +CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table SERDE_PARAMS +ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ; + +CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID); + + +-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege] +ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID); + + +-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege] +ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ; + +CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID); + + +-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable] +ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ; + +ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX TBLS_N49 ON TBLS (DB_ID); + +CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID); + +CREATE INDEX TBLS_N50 ON TBLS (SD_ID); + + +-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent] +CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME); + + +-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions] +ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED; + +CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID); + +CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID); + + +-- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions] +ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED; + +CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID); + +CREATE TABLE KEY_CONSTRAINTS +( + CHILD_CD_ID NUMBER, + CHILD_INTEGER_IDX NUMBER, + CHILD_TBL_ID NUMBER, + PARENT_CD_ID NUMBER, + PARENT_INTEGER_IDX NUMBER NOT NULL, + PARENT_TBL_ID NUMBER NOT NULL, + POSITION NUMBER NOT NULL, + CONSTRAINT_NAME VARCHAR(400) NOT NULL, + CONSTRAINT_TYPE NUMBER NOT NULL, + UPDATE_RULE NUMBER, + DELETE_RULE NUMBER, + ENABLE_VALIDATE_RELY NUMBER NOT NULL, + DEFAULT_VALUE VARCHAR(400) +) ; + +ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION); + +CREATE INDEX CONSTRAINTS_PT_INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID); + +CREATE INDEX CONSTRAINTS_CT_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE); + +-- Table for METASTORE_DB_PROPERTIES and its constraints +CREATE TABLE METASTORE_DB_PROPERTIES +( + PROPERTY_KEY VARCHAR(255) NOT NULL, + PROPERTY_VALUE VARCHAR(1000) NOT NULL, + DESCRIPTION VARCHAR(1000) +); + +ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY); + +-- Constraints for resource plan tables. + +CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME"); + +CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH); + +ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME"); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK1 FOREIGN KEY (MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID); + +ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK2 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID); + +------------------------------ +-- Transaction and lock tables +------------------------------ +CREATE TABLE TXNS ( + TXN_ID NUMBER(19) PRIMARY KEY, + TXN_STATE char(1) NOT NULL, + TXN_STARTED NUMBER(19) NOT NULL, + TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL, + TXN_AGENT_INFO varchar2(128), + TXN_META_INFO varchar2(128), + TXN_HEARTBEAT_COUNT number(10), + TXN_TYPE number(10) +) ROWDEPENDENCIES; + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID NUMBER(19) NOT NULL REFERENCES TXNS (TXN_ID), + TC_DATABASE VARCHAR2(128) NOT NULL, + TC_TABLE VARCHAR2(128), + TC_PARTITION VARCHAR2(767) NULL, + TC_OPERATION_TYPE char(1) NOT NULL, + TC_WRITEID NUMBER(19) +) ROWDEPENDENCIES; + +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID NUMBER(19) NOT NULL, + CTC_DATABASE VARCHAR2(128) NOT NULL, + CTC_TABLE VARCHAR2(256), + CTC_PARTITION VARCHAR2(767), + CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL, + CTC_WRITEID NUMBER(19) +) ROWDEPENDENCIES; + +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +CREATE TABLE NEXT_TXN_ID ( + NTXN_NEXT NUMBER(19) NOT NULL +); +INSERT INTO NEXT_TXN_ID VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID NUMBER(19) NOT NULL, + HL_LOCK_INT_ID NUMBER(19) NOT NULL, + HL_TXNID NUMBER(19) NOT NULL, + HL_DB VARCHAR2(128) NOT NULL, + HL_TABLE VARCHAR2(128), + HL_PARTITION VARCHAR2(767), + HL_LOCK_STATE CHAR(1) NOT NULL, + HL_LOCK_TYPE CHAR(1) NOT NULL, + HL_LAST_HEARTBEAT NUMBER(19) NOT NULL, + HL_ACQUIRED_AT NUMBER(19), + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + HL_HEARTBEAT_COUNT number(10), + HL_AGENT_INFO varchar2(128), + HL_BLOCKEDBY_EXT_ID number(19), + HL_BLOCKEDBY_INT_ID number(19), + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID) +) ROWDEPENDENCIES; + +CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID); + +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT NUMBER(19) NOT NULL +); +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID NUMBER(19) PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(128) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_TBLPROPERTIES varchar(2048), + CQ_WORKER_ID varchar(128), + CQ_START NUMBER(19), + CQ_RUN_AS varchar(128), + CQ_HIGHEST_WRITE_ID NUMBER(19), + CQ_META_INFO BLOB, + CQ_HADOOP_JOB_ID varchar2(32) +) ROWDEPENDENCIES; + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT NUMBER(19) NOT NULL +); +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID NUMBER(19) PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(128) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_TBLPROPERTIES varchar(2048), + CC_WORKER_ID varchar(128), + CC_START NUMBER(19), + CC_END NUMBER(19), + CC_RUN_AS varchar(128), + CC_HIGHEST_WRITE_ID NUMBER(19), + CC_META_INFO BLOB, + CC_HADOOP_JOB_ID varchar2(32) +) ROWDEPENDENCIES; + +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar2(128) NOT NULL, + MT_KEY2 number(19) NOT NULL, + MT_COMMENT varchar2(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +); + +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar2(128) NOT NULL, + WS_TABLE varchar2(128) NOT NULL, + WS_PARTITION varchar2(767), + WS_TXNID number(19) NOT NULL, + WS_COMMIT_ID number(19) NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +); + +CREATE TABLE TXN_TO_WRITE_ID ( + T2W_TXNID NUMBER(19) NOT NULL, + T2W_DATABASE VARCHAR2(128) NOT NULL, + T2W_TABLE VARCHAR2(256) NOT NULL, + T2W_WRITEID NUMBER(19) NOT NULL +); + +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); + +CREATE TABLE NEXT_WRITE_ID ( + NWI_DATABASE VARCHAR2(128) NOT NULL, + NWI_TABLE VARCHAR2(256) NOT NULL, + NWI_NEXT NUMBER(19) NOT NULL +); + +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); + +CREATE TABLE MIN_HISTORY_LEVEL ( + MHL_TXNID NUMBER(19) NOT NULL, + MHL_MIN_OPEN_TXNID NUMBER(19) NOT NULL, + PRIMARY KEY(MHL_TXNID) +); + +CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID); + +CREATE TABLE "I_SCHEMA" ( + "SCHEMA_ID" number primary key, + "SCHEMA_TYPE" number not null, + "NAME" varchar2(256) unique, + "DB_ID" number references "DBS" ("DB_ID"), + "COMPATIBILITY" number not null, + "VALIDATION_LEVEL" number not null, + "CAN_EVOLVE" number(1) not null, + "SCHEMA_GROUP" varchar2(256), + "DESCRIPTION" varchar2(4000) +); + +CREATE TABLE "SCHEMA_VERSION" ( + "SCHEMA_VERSION_ID" number primary key, + "SCHEMA_ID" number references "I_SCHEMA" ("SCHEMA_ID"), + "VERSION" number not null, + "CREATED_AT" number not null, + "CD_ID" number references "CDS" ("CD_ID"), + "STATE" number not null, + "DESCRIPTION" varchar2(4000), + "SCHEMA_TEXT" clob, + "FINGERPRINT" varchar2(256), + "SCHEMA_VERSION_NAME" varchar2(256), + "SERDE_ID" number references "SERDES" ("SERDE_ID"), + UNIQUE ("SCHEMA_ID", "VERSION") +); + +CREATE TABLE REPL_TXN_MAP ( + RTM_REPL_POLICY varchar(256) NOT NULL, + RTM_SRC_TXN_ID number(19) NOT NULL, + RTM_TARGET_TXN_ID number(19) NOT NULL, + PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID) +); + +CREATE TABLE RUNTIME_STATS ( + RS_ID NUMBER primary key, + CREATE_TIME NUMBER(10) NOT NULL, + WEIGHT NUMBER(10) NOT NULL, + PAYLOAD BLOB +); + +CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME); + + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '3.0.0', 'Hive release version 3.0.0'); diff --git standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql new file mode 100644 index 0000000000..5c12b8c2e9 --- /dev/null +++ standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql @@ -0,0 +1,1829 @@ +-- +-- PostgreSQL database dump +-- + +SET statement_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = off; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET escape_string_warning = off; + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "BUCKETING_COLS" ( + "SD_ID" bigint NOT NULL, + "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "CDS" ( + "CD_ID" bigint NOT NULL +); + + +-- +-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "COLUMNS_V2" ( + "CD_ID" bigint NOT NULL, + "COMMENT" character varying(4000), + "COLUMN_NAME" character varying(767) NOT NULL, + "TYPE_NAME" text, + "INTEGER_IDX" integer NOT NULL +); + + +-- +-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "DATABASE_PARAMS" ( + "DB_ID" bigint NOT NULL, + "PARAM_KEY" character varying(180) NOT NULL, + "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying +); + + +CREATE TABLE "CTLGS" ( + "CTLG_ID" BIGINT PRIMARY KEY, + "NAME" VARCHAR(256) UNIQUE, + "DESC" VARCHAR(4000), + "LOCATION_URI" VARCHAR(4000) NOT NULL +); + +-- +-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "DBS" ( + "DB_ID" bigint NOT NULL, + "DESC" character varying(4000) DEFAULT NULL::character varying, + "DB_LOCATION_URI" character varying(4000) NOT NULL, + "NAME" character varying(128) DEFAULT NULL::character varying, + "OWNER_NAME" character varying(128) DEFAULT NULL::character varying, + "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying, + "CTLG_NAME" varchar(256) +); + + +-- +-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "DB_PRIVS" ( + "DB_GRANT_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "DB_ID" bigint, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "DB_PRIV" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "GLOBAL_PRIVS" ( + "USER_GRANT_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "USER_PRIV" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "IDXS" ( + "INDEX_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "DEFERRED_REBUILD" boolean NOT NULL, + "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying, + "INDEX_NAME" character varying(128) DEFAULT NULL::character varying, + "INDEX_TBL_ID" bigint, + "LAST_ACCESS_TIME" bigint NOT NULL, + "ORIG_TBL_ID" bigint, + "SD_ID" bigint +); + + +-- +-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "INDEX_PARAMS" ( + "INDEX_ID" bigint NOT NULL, + "PARAM_KEY" character varying(256) NOT NULL, + "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying +); + + +-- +-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "NUCLEUS_TABLES" ( + "CLASS_NAME" character varying(128) NOT NULL, + "TABLE_NAME" character varying(128) NOT NULL, + "TYPE" character varying(4) NOT NULL, + "OWNER" character varying(2) NOT NULL, + "VERSION" character varying(20) NOT NULL, + "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying +); + + +-- +-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITIONS" ( + "PART_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "LAST_ACCESS_TIME" bigint NOT NULL, + "PART_NAME" character varying(767) DEFAULT NULL::character varying, + "SD_ID" bigint, + "TBL_ID" bigint +); + + +-- +-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITION_EVENTS" ( + "PART_NAME_ID" bigint NOT NULL, + "CAT_NAME" character varying(256), + "DB_NAME" character varying(128), + "EVENT_TIME" bigint NOT NULL, + "EVENT_TYPE" integer NOT NULL, + "PARTITION_NAME" character varying(767), + "TBL_NAME" character varying(256) +); + + +-- +-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITION_KEYS" ( + "TBL_ID" bigint NOT NULL, + "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying, + "PKEY_NAME" character varying(128) NOT NULL, + "PKEY_TYPE" character varying(767) NOT NULL, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITION_KEY_VALS" ( + "PART_ID" bigint NOT NULL, + "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITION_PARAMS" ( + "PART_ID" bigint NOT NULL, + "PARAM_KEY" character varying(256) NOT NULL, + "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying +); + + +-- +-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PART_COL_PRIVS" ( + "PART_COLUMN_GRANT_ID" bigint NOT NULL, + "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_ID" bigint, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PART_PRIVS" ( + "PART_GRANT_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_ID" bigint, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_PRIV" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "ROLES" ( + "ROLE_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "OWNER_NAME" character varying(128) DEFAULT NULL::character varying, + "ROLE_NAME" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "ROLE_MAP" ( + "ROLE_GRANT_ID" bigint NOT NULL, + "ADD_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "ROLE_ID" bigint +); + + +-- +-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SDS" ( + "SD_ID" bigint NOT NULL, + "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying, + "IS_COMPRESSED" boolean NOT NULL, + "LOCATION" character varying(4000) DEFAULT NULL::character varying, + "NUM_BUCKETS" bigint NOT NULL, + "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying, + "SERDE_ID" bigint, + "CD_ID" bigint +); + + +-- +-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SD_PARAMS" ( + "SD_ID" bigint NOT NULL, + "PARAM_KEY" character varying(256) NOT NULL, + "PARAM_VALUE" text DEFAULT NULL +); + + +-- +-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SEQUENCE_TABLE" ( + "SEQUENCE_NAME" character varying(255) NOT NULL, + "NEXT_VAL" bigint NOT NULL +); + +INSERT INTO "SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1); + +-- +-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SERDES" ( + "SERDE_ID" bigint NOT NULL, + "NAME" character varying(128) DEFAULT NULL::character varying, + "SLIB" character varying(4000) DEFAULT NULL::character varying, + "DESCRIPTION" varchar(4000), + "SERIALIZER_CLASS" varchar(4000), + "DESERIALIZER_CLASS" varchar(4000), + "SERDE_TYPE" integer +); + + +-- +-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SERDE_PARAMS" ( + "SERDE_ID" bigint NOT NULL, + "PARAM_KEY" character varying(256) NOT NULL, + "PARAM_VALUE" text DEFAULT NULL +); + + +-- +-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SORT_COLS" ( + "SD_ID" bigint NOT NULL, + "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying, + "ORDER" bigint NOT NULL, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TABLE_PARAMS" ( + "TBL_ID" bigint NOT NULL, + "PARAM_KEY" character varying(256) NOT NULL, + "PARAM_VALUE" text DEFAULT NULL +); + + +-- +-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TBLS" ( + "TBL_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "DB_ID" bigint, + "LAST_ACCESS_TIME" bigint NOT NULL, + "OWNER" character varying(767) DEFAULT NULL::character varying, + "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying, + "RETENTION" bigint NOT NULL, + "SD_ID" bigint, + "TBL_NAME" character varying(256) DEFAULT NULL::character varying, + "TBL_TYPE" character varying(128) DEFAULT NULL::character varying, + "VIEW_EXPANDED_TEXT" text, + "VIEW_ORIGINAL_TEXT" text, + "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false +); + +-- +-- Name: MV_CREATION_METADATA; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "MV_CREATION_METADATA" ( + "MV_CREATION_METADATA_ID" bigint NOT NULL, + "CAT_NAME" character varying(256) NOT NULL, + "DB_NAME" character varying(128) NOT NULL, + "TBL_NAME" character varying(256) NOT NULL, + "TXN_LIST" text +); + +-- +-- Name: MV_TABLES_USED; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "MV_TABLES_USED" ( + "MV_CREATION_METADATA_ID" bigint NOT NULL, + "TBL_ID" bigint NOT NULL +); + +-- +-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TBL_COL_PRIVS" ( + "TBL_COLUMN_GRANT_ID" bigint NOT NULL, + "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying, + "TBL_ID" bigint +); + + +-- +-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TBL_PRIVS" ( + "TBL_GRANT_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "TBL_PRIV" character varying(128) DEFAULT NULL::character varying, + "TBL_ID" bigint +); + + +-- +-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TYPES" ( + "TYPES_ID" bigint NOT NULL, + "TYPE_NAME" character varying(128) DEFAULT NULL::character varying, + "TYPE1" character varying(767) DEFAULT NULL::character varying, + "TYPE2" character varying(767) DEFAULT NULL::character varying +); + + +-- +-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TYPE_FIELDS" ( + "TYPE_NAME" bigint NOT NULL, + "COMMENT" character varying(256) DEFAULT NULL::character varying, + "FIELD_NAME" character varying(128) NOT NULL, + "FIELD_TYPE" character varying(767) NOT NULL, + "INTEGER_IDX" bigint NOT NULL +); + +CREATE TABLE "SKEWED_STRING_LIST" ( + "STRING_LIST_ID" bigint NOT NULL +); + +CREATE TABLE "SKEWED_STRING_LIST_VALUES" ( + "STRING_LIST_ID" bigint NOT NULL, + "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying, + "INTEGER_IDX" bigint NOT NULL +); + +CREATE TABLE "SKEWED_COL_NAMES" ( + "SD_ID" bigint NOT NULL, + "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying, + "INTEGER_IDX" bigint NOT NULL +); + +CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" ( + "SD_ID" bigint NOT NULL, + "STRING_LIST_ID_KID" bigint NOT NULL, + "LOCATION" character varying(4000) DEFAULT NULL::character varying +); + +CREATE TABLE "SKEWED_VALUES" ( + "SD_ID_OID" bigint NOT NULL, + "STRING_LIST_ID_EID" bigint NOT NULL, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "MASTER_KEYS" +( + "KEY_ID" SERIAL, + "MASTER_KEY" varchar(767) NULL, + PRIMARY KEY ("KEY_ID") +); + +CREATE TABLE "DELEGATION_TOKENS" +( + "TOKEN_IDENT" varchar(767) NOT NULL, + "TOKEN" varchar(767) NULL, + PRIMARY KEY ("TOKEN_IDENT") +); + +CREATE TABLE "TAB_COL_STATS" ( + "CS_ID" bigint NOT NULL, + "CAT_NAME" character varying(256) DEFAULT NULL::character varying, + "DB_NAME" character varying(128) DEFAULT NULL::character varying, + "TABLE_NAME" character varying(256) DEFAULT NULL::character varying, + "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying, + "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying, + "TBL_ID" bigint NOT NULL, + "LONG_LOW_VALUE" bigint, + "LONG_HIGH_VALUE" bigint, + "DOUBLE_LOW_VALUE" double precision, + "DOUBLE_HIGH_VALUE" double precision, + "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying, + "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying, + "NUM_NULLS" bigint NOT NULL, + "NUM_DISTINCTS" bigint, + "BIT_VECTOR" bytea, + "AVG_COL_LEN" double precision, + "MAX_COL_LEN" bigint, + "NUM_TRUES" bigint, + "NUM_FALSES" bigint, + "LAST_ANALYZED" bigint NOT NULL +); + +-- +-- Table structure for VERSION +-- +CREATE TABLE "VERSION" ( + "VER_ID" bigint, + "SCHEMA_VERSION" character varying(127) NOT NULL, + "VERSION_COMMENT" character varying(255) NOT NULL +); + +-- +-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PART_COL_STATS" ( + "CS_ID" bigint NOT NULL, + "CAT_NAME" character varying(256) DEFAULT NULL::character varying, + "DB_NAME" character varying(128) DEFAULT NULL::character varying, + "TABLE_NAME" character varying(256) DEFAULT NULL::character varying, + "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying, + "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying, + "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_ID" bigint NOT NULL, + "LONG_LOW_VALUE" bigint, + "LONG_HIGH_VALUE" bigint, + "DOUBLE_LOW_VALUE" double precision, + "DOUBLE_HIGH_VALUE" double precision, + "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying, + "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying, + "NUM_NULLS" bigint NOT NULL, + "NUM_DISTINCTS" bigint, + "BIT_VECTOR" bytea, + "AVG_COL_LEN" double precision, + "MAX_COL_LEN" bigint, + "NUM_TRUES" bigint, + "NUM_FALSES" bigint, + "LAST_ANALYZED" bigint NOT NULL +); + +-- +-- Table structure for FUNCS +-- +CREATE TABLE "FUNCS" ( + "FUNC_ID" BIGINT NOT NULL, + "CLASS_NAME" VARCHAR(4000), + "CREATE_TIME" INTEGER NOT NULL, + "DB_ID" BIGINT, + "FUNC_NAME" VARCHAR(128), + "FUNC_TYPE" INTEGER NOT NULL, + "OWNER_NAME" VARCHAR(128), + "OWNER_TYPE" VARCHAR(10), + PRIMARY KEY ("FUNC_ID") +); + +-- +-- Table structure for FUNC_RU +-- +CREATE TABLE "FUNC_RU" ( + "FUNC_ID" BIGINT NOT NULL, + "RESOURCE_TYPE" INTEGER NOT NULL, + "RESOURCE_URI" VARCHAR(4000), + "INTEGER_IDX" INTEGER NOT NULL, + PRIMARY KEY ("FUNC_ID", "INTEGER_IDX") +); + +CREATE TABLE "NOTIFICATION_LOG" +( + "NL_ID" BIGINT NOT NULL, + "EVENT_ID" BIGINT NOT NULL, + "EVENT_TIME" INTEGER NOT NULL, + "EVENT_TYPE" VARCHAR(32) NOT NULL, + "CAT_NAME" VARCHAR(256), + "DB_NAME" VARCHAR(128), + "TBL_NAME" VARCHAR(256), + "MESSAGE" text, + "MESSAGE_FORMAT" VARCHAR(16), + PRIMARY KEY ("NL_ID") +); + +CREATE TABLE "NOTIFICATION_SEQUENCE" +( + "NNI_ID" BIGINT NOT NULL, + "NEXT_EVENT_ID" BIGINT NOT NULL, + PRIMARY KEY ("NNI_ID") +); + +INSERT INTO "NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT 1,1 WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "NOTIFICATION_SEQUENCE"); + +CREATE TABLE "KEY_CONSTRAINTS" +( + "CHILD_CD_ID" BIGINT, + "CHILD_INTEGER_IDX" BIGINT, + "CHILD_TBL_ID" BIGINT, + "PARENT_CD_ID" BIGINT, + "PARENT_INTEGER_IDX" BIGINT NOT NULL, + "PARENT_TBL_ID" BIGINT NOT NULL, + "POSITION" BIGINT NOT NULL, + "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, + "CONSTRAINT_TYPE" SMALLINT NOT NULL, + "UPDATE_RULE" SMALLINT, + "DELETE_RULE" SMALLINT, + "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL, + "DEFAULT_VALUE" VARCHAR(400), + PRIMARY KEY ("CONSTRAINT_NAME", "POSITION") +) ; + +--- +--- Table structure for METASTORE_DB_PROPERTIES +--- +CREATE TABLE "METASTORE_DB_PROPERTIES" +( + "PROPERTY_KEY" VARCHAR(255) NOT NULL, + "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, + "DESCRIPTION" VARCHAR(1000) +); + + +CREATE TABLE "WM_RESOURCEPLAN" ( + "RP_ID" bigint NOT NULL, + "NAME" character varying(128) NOT NULL, + "QUERY_PARALLELISM" integer, + "STATUS" character varying(20) NOT NULL, + "DEFAULT_POOL_ID" bigint +); + +CREATE TABLE "WM_POOL" ( + "POOL_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "PATH" character varying(1024) NOT NULL, + "ALLOC_FRACTION" double precision, + "QUERY_PARALLELISM" integer, + "SCHEDULING_POLICY" character varying(1024) +); + +CREATE TABLE "WM_TRIGGER" ( + "TRIGGER_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "NAME" character varying(128) NOT NULL, + "TRIGGER_EXPRESSION" character varying(1024) DEFAULT NULL::character varying, + "ACTION_EXPRESSION" character varying(1024) DEFAULT NULL::character varying, + "IS_IN_UNMANAGED" smallint NOT NULL DEFAULT 0 +); + +CREATE TABLE "WM_POOL_TO_TRIGGER" ( + "POOL_ID" bigint NOT NULL, + "TRIGGER_ID" bigint NOT NULL +); + +CREATE TABLE "WM_MAPPING" ( + "MAPPING_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "ENTITY_TYPE" character varying(128) NOT NULL, + "ENTITY_NAME" character varying(128) NOT NULL, + "POOL_ID" bigint, + "ORDERING" integer +); + +-- +-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "BUCKETING_COLS" + ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + + +-- +-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "CDS" + ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID"); + + +-- +-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "COLUMNS_V2" + ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME"); + + +-- +-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DATABASE_PARAMS" + ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY"); + + +-- +-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DB_PRIVS" + ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DBS" + ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID"); + + +-- +-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DB_PRIVS" + ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID"); + + +-- +-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "GLOBAL_PRIVS" + ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "GLOBAL_PRIVS" + ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID"); + + +-- +-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "IDXS" + ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID"); + + +-- +-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "INDEX_PARAMS" + ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY"); + + +-- +-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "NUCLEUS_TABLES" + ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME"); + + +-- +-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITIONS" + ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID"); + + +-- +-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITION_EVENTS" + ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID"); + + +-- +-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITION_KEYS" + ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME"); + + +-- +-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITION_KEY_VALS" + ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX"); + + +-- +-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITION_PARAMS" + ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY"); + + +-- +-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PART_COL_PRIVS" + ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID"); + + +-- +-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PART_PRIVS" + ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID"); + + +-- +-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "ROLES" + ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME"); + + +-- +-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "ROLES" + ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID"); + + +-- +-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "ROLE_MAP" + ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID"); + + +-- +-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SDS" + ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID"); + + +-- +-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SD_PARAMS" + ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY"); + + +-- +-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SEQUENCE_TABLE" + ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME"); + + +-- +-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SERDES" + ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID"); + + +-- +-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SERDE_PARAMS" + ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY"); + + +-- +-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SORT_COLS" + ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + + +-- +-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TABLE_PARAMS" + ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY"); + + +-- +-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TBLS" + ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID"); + + +-- +-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TBL_COL_PRIVS" + ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID"); + + +-- +-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TBL_PRIVS" + ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID"); + + +-- +-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TYPES" + ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID"); + + +-- +-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TYPE_FIELDS" + ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME"); + +ALTER TABLE ONLY "SKEWED_STRING_LIST" + ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID"); + +ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES" + ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX"); + + +ALTER TABLE ONLY "SKEWED_COL_NAMES" + ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP" + ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID"); + +ALTER TABLE ONLY "SKEWED_VALUES" + ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX"); + +-- +-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- +ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID"); + +-- +-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- +ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID"); + +-- +-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "IDXS" + ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID"); + + +-- +-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITIONS" + ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID"); + + +-- +-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TBLS" + ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID"); + + +-- +-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DBS" + ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME"); + + +-- +-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TYPES" + ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME"); + + +-- +-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "ROLE_MAP" + ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE"); + +ALTER TABLE ONLY "METASTORE_DB_PROPERTIES" + ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY"); + + +-- Resource plan: Primary key and unique key constraints. +ALTER TABLE ONLY "WM_RESOURCEPLAN" + ADD CONSTRAINT "WM_RESOURCEPLAN_pkey" PRIMARY KEY ("RP_ID"); + +ALTER TABLE ONLY "WM_RESOURCEPLAN" + ADD CONSTRAINT "UNIQUE_WM_RESOURCEPLAN" UNIQUE ("NAME"); + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "WM_POOL_pkey" PRIMARY KEY ("POOL_ID"); + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "UNIQUE_WM_POOL" UNIQUE ("RP_ID", "PATH"); + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "WM_TRIGGER_pkey" PRIMARY KEY ("TRIGGER_ID"); + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "UNIQUE_WM_TRIGGER" UNIQUE ("RP_ID", "NAME"); + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_pkey" PRIMARY KEY ("POOL_ID", "TRIGGER_ID"); + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_pkey" PRIMARY KEY ("MAPPING_ID"); + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "UNIQUE_WM_MAPPING" UNIQUE ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME"); + +-- +-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID"); + + +-- +-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID"); + + +-- +-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID"); + + +-- +-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID"); + + +-- +-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID"); + + +-- +-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID"); + + +-- +-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID"); + + +-- +-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME"); + + +-- +-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID"); + + +-- +-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID"); + + +-- +-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID"); + + +-- +-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID"); + + +-- +-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID"); + + +-- +-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID"); + + +-- +-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID"); + + +-- +-- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); + + +-- +-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID"); + + +-- +-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID"); + + +-- +-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID"); + + +-- +-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID"); + + +-- +-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID"); + + +-- +-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID"); + + +-- +-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID"); + + +-- +-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID"); + + +-- +-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID"); + + +-- +-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID"); + + +-- +-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME"); + +-- +-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID"); + +-- +-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID"); + +-- +-- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID"); + +-- +-- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID"); + +-- +-- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID"); + +CREATE INDEX "CONSTRAINTS_PARENT_TBLID_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("PARENT_TBL_ID"); + +CREATE INDEX "CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("CONSTRAINT_TYPE"); + +ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES" + ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE; + + +ALTER TABLE ONLY "SKEWED_COL_NAMES" + ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP" + ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + +ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP" + ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE; + +ALTER TABLE ONLY "SKEWED_VALUES" + ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE; + +ALTER TABLE ONLY "SKEWED_VALUES" + ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "BUCKETING_COLS" + ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "COLUMNS_V2" + ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE; + + +-- +-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "DATABASE_PARAMS" + ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE; + + +-- +-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "DB_PRIVS" + ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE; + + +-- +-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "IDXS" + ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "IDXS" + ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "IDXS" + ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "INDEX_PARAMS" + ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE; + + +-- +-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITIONS" + ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITIONS" + ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITION_KEYS" + ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITION_KEY_VALS" + ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + + +-- +-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITION_PARAMS" + ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + + +-- +-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PART_COL_PRIVS" + ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + + +-- +-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PART_PRIVS" + ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + + +-- +-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "ROLE_MAP" + ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE; + + +-- +-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SDS" + ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE; + + +-- +-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SDS" + ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE; + + +-- +-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SD_PARAMS" + ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SERDE_PARAMS" + ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE; + + +-- +-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SORT_COLS" + ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TABLE_PARAMS" + ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TBLS" + ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE; + + +-- +-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TBLS" + ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TBL_COL_PRIVS" + ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TBL_PRIVS" + ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TYPE_FIELDS" + ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE; + +-- +-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- +ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- +ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + +ALTER TABLE "DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "CTLGS" ("NAME"); + +ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID"); + +-- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +ALTER TABLE ONLY "FUNCS" + ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE; + +-- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +ALTER TABLE ONLY "FUNC_RU" + ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE; + +-- Resource plan FK constraints. + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_RESOURCEPLAN" + ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "WM_TRIGGER" ("TRIGGER_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; + +ALTER TABLE ONLY "MV_CREATION_METADATA" + ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID"); + +CREATE INDEX "MV_UNIQUE_TABLE" + ON "MV_CREATION_METADATA" USING btree ("TBL_NAME", "DB_NAME"); + +ALTER TABLE ONLY "MV_TABLES_USED" + ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN KEY ("MV_CREATION_METADATA_ID") REFERENCES "MV_CREATION_METADATA" ("MV_CREATION_METADATA_ID") DEFERRABLE; + +ALTER TABLE ONLY "MV_TABLES_USED" + ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE; + +-- +-- Name: public; Type: ACL; Schema: -; Owner: hiveuser +-- + +REVOKE ALL ON SCHEMA public FROM PUBLIC; +GRANT ALL ON SCHEMA public TO PUBLIC; + +-- +-- PostgreSQL database dump complete +-- + +------------------------------ +-- Transaction and lock tables +------------------------------ +CREATE TABLE TXNS ( + TXN_ID bigint PRIMARY KEY, + TXN_STATE char(1) NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL, + TXN_AGENT_INFO varchar(128), + TXN_META_INFO varchar(128), + TXN_HEARTBEAT_COUNT integer, + TXN_TYPE integer +); + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID), + TC_DATABASE varchar(128) NOT NULL, + TC_TABLE varchar(128), + TC_PARTITION varchar(767) DEFAULT NULL, + TC_OPERATION_TYPE char(1) NOT NULL, + TC_WRITEID bigint +); + +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS USING hash (TC_TXNID); + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID bigint NOT NULL, + CTC_DATABASE varchar(128) NOT NULL, + CTC_TABLE varchar(256), + CTC_PARTITION varchar(767), + CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL, + CTC_WRITEID bigint +); + +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +CREATE TABLE NEXT_TXN_ID ( + NTXN_NEXT bigint NOT NULL +); +INSERT INTO NEXT_TXN_ID VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint NOT NULL, + HL_DB varchar(128) NOT NULL, + HL_TABLE varchar(128), + HL_PARTITION varchar(767) DEFAULT NULL, + HL_LOCK_STATE char(1) NOT NULL, + HL_LOCK_TYPE char(1) NOT NULL, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint, + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + HL_HEARTBEAT_COUNT integer, + HL_AGENT_INFO varchar(128), + HL_BLOCKEDBY_EXT_ID bigint, + HL_BLOCKEDBY_INT_ID bigint, + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID) +); + +CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID); + +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT bigint NOT NULL +); +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID bigint PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(128) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_TBLPROPERTIES varchar(2048), + CQ_WORKER_ID varchar(128), + CQ_START bigint, + CQ_RUN_AS varchar(128), + CQ_HIGHEST_WRITE_ID bigint, + CQ_META_INFO bytea, + CQ_HADOOP_JOB_ID varchar(32) +); + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT bigint NOT NULL +); +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID bigint PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(128) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_TBLPROPERTIES varchar(2048), + CC_WORKER_ID varchar(128), + CC_START bigint, + CC_END bigint, + CC_RUN_AS varchar(128), + CC_HIGHEST_WRITE_ID bigint, + CC_META_INFO bytea, + CC_HADOOP_JOB_ID varchar(32) +); + +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar(128) NOT NULL, + MT_KEY2 bigint NOT NULL, + MT_COMMENT varchar(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +); + +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar(128) NOT NULL, + WS_TABLE varchar(128) NOT NULL, + WS_PARTITION varchar(767), + WS_TXNID bigint NOT NULL, + WS_COMMIT_ID bigint NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +); + +CREATE TABLE TXN_TO_WRITE_ID ( + T2W_TXNID bigint NOT NULL, + T2W_DATABASE varchar(128) NOT NULL, + T2W_TABLE varchar(256) NOT NULL, + T2W_WRITEID bigint NOT NULL +); + +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); + +CREATE TABLE NEXT_WRITE_ID ( + NWI_DATABASE varchar(128) NOT NULL, + NWI_TABLE varchar(256) NOT NULL, + NWI_NEXT bigint NOT NULL +); + +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); + +CREATE TABLE MIN_HISTORY_LEVEL ( + MHL_TXNID bigint NOT NULL, + MHL_MIN_OPEN_TXNID bigint NOT NULL, + PRIMARY KEY(MHL_TXNID) +); + +CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID); + +CREATE TABLE "I_SCHEMA" ( + "SCHEMA_ID" bigint primary key, + "SCHEMA_TYPE" integer not null, + "NAME" varchar(256) unique, + "DB_ID" bigint references "DBS" ("DB_ID"), + "COMPATIBILITY" integer not null, + "VALIDATION_LEVEL" integer not null, + "CAN_EVOLVE" boolean not null, + "SCHEMA_GROUP" varchar(256), + "DESCRIPTION" varchar(4000) +); + +CREATE TABLE "SCHEMA_VERSION" ( + "SCHEMA_VERSION_ID" bigint primary key, + "SCHEMA_ID" bigint references "I_SCHEMA" ("SCHEMA_ID"), + "VERSION" integer not null, + "CREATED_AT" bigint not null, + "CD_ID" bigint references "CDS" ("CD_ID"), + "STATE" integer not null, + "DESCRIPTION" varchar(4000), + "SCHEMA_TEXT" text, + "FINGERPRINT" varchar(256), + "SCHEMA_VERSION_NAME" varchar(256), + "SERDE_ID" bigint references "SERDES" ("SERDE_ID"), + unique ("SCHEMA_ID", "VERSION") +); + +CREATE TABLE REPL_TXN_MAP ( + RTM_REPL_POLICY varchar(256) NOT NULL, + RTM_SRC_TXN_ID bigint NOT NULL, + RTM_TARGET_TXN_ID bigint NOT NULL, + PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID) +); + + +CREATE TABLE RUNTIME_STATS ( + RS_ID bigint primary key, + CREATE_TIME bigint NOT NULL, + WEIGHT bigint NOT NULL, + PAYLOAD bytea +); + +CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME); + + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '3.0.0', 'Hive release version 3.0.0'); diff --git standalone-metastore/src/main/thrift/hive_metastore.thrift standalone-metastore/src/main/thrift/hive_metastore.thrift index 19d4433078..d78776bd00 100644 --- standalone-metastore/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -401,8 +401,7 @@ struct StorageDescriptor { 8: list bucketCols, // reducer grouping columns and clustering columns and bucketing columns` 9: list sortCols, // sort order of the data in each bucket 10: map parameters, // any user supplied key value hash - 11: optional SkewedInfo skewedInfo, // skewed information - 12: optional bool storedAsSubDirectories // stored as subdirectories or not + 11: optional SkewedInfo skewedInfo } // table information diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index cb32236d54..d8c2e8cd70 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -1857,7 +1857,6 @@ public void testComplexTable() throws Exception { assertEquals(tbl2.getTableName(), tblName); assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size()); assertFalse(tbl2.getSd().isCompressed()); - assertFalse(tbl2.getSd().isStoredAsSubDirectories()); assertEquals(tbl2.getSd().getNumBuckets(), 1); assertEquals("Use this for comments etc", tbl2.getSd().getParameters() diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index 62ed380dfc..ee088f5f68 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -52,14 +52,11 @@ import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector; import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; -import jline.internal.Log; - import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; @Category(MetastoreCheckinTest.class) @@ -1054,7 +1051,6 @@ private Table createTestTbl(String dbName, String tblName, String tblOwner, SerDeInfo serdeInfo = new SerDeInfo("serde", "seriallib", new HashMap<>()); StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serdeInfo, null, null, serdeParams); - sd.setStoredAsSubDirectories(false); Table tbl = new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, ptnCols, tblParams, null, null, TableType.MANAGED_TABLE.toString()); tbl.setCatName(DEFAULT_CATALOG_NAME); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java index 88064d920f..d835dbe406 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java @@ -1440,7 +1440,6 @@ private Table createTable(String dbName, String tableName, List par .setPartCols(partCols) .addStorageDescriptorParam("partTestSDParamKey", "partTestSDParamValue") .setSerdeName(tableName) - .setStoredAsSubDirectories(false) .addSerdeParam("partTestSerdeParamKey", "partTestSerdeParamValue") .setLocation(location) .create(client, metaStore.getConf()); @@ -1580,8 +1579,6 @@ private void verifyPartitionAttributesDefaultValues(Partition partition, String "org.apache.hadoop.hive.ql.io.HiveOutputFormat", sd.getOutputFormat()); Assert.assertFalse("The compressed attribute doesn't have the default value.", sd.isCompressed()); - Assert.assertFalse("The storedAsSubDirectories attribute doesn't have the default value.", - sd.isStoredAsSubDirectories()); Assert.assertEquals("The numBuckets attribute doesn't have the default value.", 0, sd.getNumBuckets()); Assert.assertTrue("The default value of the attribute 'bucketCols' should be an empty list.", diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java index debcd0eee9..fd87b463f6 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java @@ -916,7 +916,6 @@ private Table createTable(String dbName, String tableName, List par .setPartCols(partCols) .addStorageDescriptorParam("partTestSDParamKey", "partTestSDParamValue") .setSerdeName(tableName) - .setStoredAsSubDirectories(false) .addSerdeParam("partTestSerdeParamKey", "partTestSerdeParamValue") .setLocation(location) .create(client, metaStore.getConf()); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java index be9e7c94c4..9ccd71fda7 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java @@ -298,7 +298,6 @@ public void testCreateTableDefaultValues() throws Exception { Assert.assertEquals("Storage descriptor bucket cols", 0, createdSd.getBucketCols().size()); Assert.assertEquals("Storage descriptor sort cols", 0, createdSd.getSortCols().size()); Assert.assertEquals("Storage descriptor parameters", 0, createdSd.getParameters().size()); - Assert.assertFalse("Storage descriptor stored as subdir", createdSd.isStoredAsSubDirectories()); // Serde info SerDeInfo serDeInfo = createdSd.getSerdeInfo(); @@ -1359,7 +1358,6 @@ private Table getTableWithAllParametersSet() throws MetaException { .setViewOriginalText("viewOriginalText") .setSerdeLib("serdelib") .setSerdeName("serdename") - .setStoredAsSubDirectories(true) .addSerdeParam("serdeParam", "serdeParamValue") .addTableParam("tableParam", "tableParamValue") .addStorageDescriptorParam("sdParam", "sdParamValue") diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java index 9e425cff06..75722878e6 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java @@ -341,10 +341,10 @@ public void testValidateLocations() throws Exception { "insert into CTLGS values (1, 'mycat', 'mydescription', 'hdfs://myhost.com:8020/user/hive/warehouse');", "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role', 'mycat');", "insert into DBS values(7, 'db with bad port', 'hdfs://myhost.com:8020/', 'haDB', 'public', 'role', 'mycat');", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3,null,'org.apache.hadoop.mapred.TextInputFormat','N','N',null,-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3,null,'org.apache.hadoop.mapred.TextInputFormat','N',null,-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4000,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (3 ,1435255431,2,0 ,'hive',0,3,'myView','VIRTUAL_VIEW','select a.col1,a.col2 from foo','select * from foo','n');", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4012 ,1435255431,7,0 ,'hive',0,4000,'mytal4012','MANAGED_TABLE',NULL,NULL,'n');", @@ -371,17 +371,17 @@ public void testValidateLocations() throws Exception { "insert into DBS values(4, 'my db2', 'hdfs://myhost.com:8020', '', 'public', 'role', 'mycat');", "insert into DBS values(6, 'db with bad port', 'hdfs://myhost.com:8020:', 'zDB', 'public', 'role', 'mycat');", "insert into DBS values(7, 'db with bad port', 'hdfs://mynameservice.com/', 'haDB', 'public', 'role', 'mycat');", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','file:///user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','file:///user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');", "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(1, 1441402388,0, 'd1=1/d2=1',2,2);", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4001,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4003,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4004,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4002,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (5000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','file:///user/admin/2016_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3000,null,'org.apache.hadoop.mapred.TextInputFormat','N','yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4000,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4001,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4003,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4004,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4002,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (5000,null,'org.apache.hadoop.mapred.TextInputFormat','N','file:///user/admin/2016_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (3000 ,1435255431,2,0 ,'hive',0,3000,'mytal3000','MANAGED_TABLE',NULL,NULL,'n');", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4011 ,1435255431,4,0 ,'hive',0,4001,'mytal4011','MANAGED_TABLE',NULL,NULL,'n');", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4012 ,1435255431,4,0 ,'hive',0,4002,'','MANAGED_TABLE',NULL,NULL,'n');", @@ -469,8 +469,8 @@ private void createTestHiveTableSchemas() throws IOException { String[] scripts = new String[] { "insert into CTLGS values (1, 'mycat', 'my description', 'hdfs://myhost.com:8020/user/hive/warehouse');", "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role', 'mycat');", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", - "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", + "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (3 ,1435255431,2,0 ,'hive',0,2,'aTable','MANAGED_TABLE',NULL,NULL,'n');", "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(1, 1441402388,0, 'd1=1/d2=1',2,2);"