Index: conf/hive-default.xml.template
===================================================================
--- conf/hive-default.xml.template (revision 1242898)
+++ conf/hive-default.xml.template (working copy)
@@ -23,7 +23,7 @@
-
+
mapred.reduce.tasks
@@ -998,45 +998,45 @@
hive.security.authorization.manager
org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider
the hive client authorization manager class name.
- The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
+ The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
hive.security.authenticator.manager
org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator
- hive client authenticator manager class name.
+ hive client authenticator manager class name.
The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
hive.security.authorization.createtable.user.grants
- the privileges automatically granted to some users whenever a table gets created.
- An example like "userX,userY:select;userZ:create" will grant select privilege to userX and userY,
+ the privileges automatically granted to some users whenever a table gets created.
+ An example like "userX,userY:select;userZ:create" will grant select privilege to userX and userY,
and grant create privilege to userZ whenever a new table created.
hive.security.authorization.createtable.group.grants
- the privileges automatically granted to some groups whenever a table gets created.
- An example like "groupX,groupY:select;groupZ:create" will grant select privilege to groupX and groupY,
+ the privileges automatically granted to some groups whenever a table gets created.
+ An example like "groupX,groupY:select;groupZ:create" will grant select privilege to groupX and groupY,
and grant create privilege to groupZ whenever a new table created.
hive.security.authorization.createtable.role.grants
- the privileges automatically granted to some roles whenever a table gets created.
- An example like "roleX,roleY:select;roleZ:create" will grant select privilege to roleX and roleY,
+ the privileges automatically granted to some roles whenever a table gets created.
+ An example like "roleX,roleY:select;roleZ:create" will grant select privilege to roleX and roleY,
and grant create privilege to roleZ whenever a new table created.
hive.security.authorization.createtable.owner.grants
- the privileges automatically granted to the owner whenever a table gets created.
+ the privileges automatically granted to the owner whenever a table gets created.
An example like "select,drop" will grant select and drop privilege to the owner of the table
@@ -1044,7 +1044,7 @@
hive.metastore.authorization.storage.checks
false
Should the metastore do authorization checks against the underlying storage
- for operations like drop-partition (disallow the drop-partition if the user in
+ for operations like drop-partition (disallow the drop-partition if the user in
question doesn't have permissions to delete the corresponding directory
on the storage).
@@ -1058,7 +1058,7 @@
hive.index.compact.file.ignore.hdfs
false
- True the hdfs location stored in the index file will be igbored at runtime.
+ True the hdfs location stored in the index file will be igbored at runtime.
If the data got moved or the name of the cluster got changed, the index data should still be usable.
@@ -1102,7 +1102,7 @@
hive.lock.mapred.only.operation
false
- This param is to control whether or not only do lock on queries
+ This param is to control whether or not only do lock on queries
that need to execute at least one mapred job.
@@ -1136,7 +1136,7 @@
hive.rework.mapredwork
false
- should rework the mapred work or not.
+ should rework the mapred work or not.
This is first introduced by SymlinkTextInputFormat to replace symlink files with real paths at compile time.
@@ -1144,9 +1144,9 @@
hive.exec.concatenate.check.index
true
If this sets to true, hive will throw error when doing
- 'alter table tbl_name [partSpec] concatenate' on a table/partition
- that has indexes on it. The reason the user want to set this to true
- is because it can help user to avoid handling all index drop, recreation,
+ 'alter table tbl_name [partSpec] concatenate' on a table/partition
+ that has indexes on it. The reason the user want to set this to true
+ is because it can help user to avoid handling all index drop, recreation,
rebuild work. This is very helpful for tables with thousands of partitions.
@@ -1161,14 +1161,14 @@
hive.io.exception.handlers
A list of io exception handler class names. This is used
- to construct a list exception handlers to handle exceptions thrown
+ to construct a list exception handlers to handle exceptions thrown
by record readers
hive.autogen.columnalias.prefix.label
_c
- String used as a prefix when auto generating column alias.
+ String used as a prefix when auto generating column alias.
By default the prefix label will be appended with a column position number to form the column alias. Auto generation would happen if an aggregate function is used in a select clause without an explicit alias.
@@ -1199,7 +1199,7 @@
hive.insert.into.multilevel.dirs
false
- Where to insert into multilevel directories like
+ Where to insert into multilevel directories like
"insert directory '/HIVEFT25686/chinna/' from table"
@@ -1209,4 +1209,22 @@
The dfs.umask value for the hive created folders
+
+ hive.use.default.cluster
+ true
+ Use the default cluster
+
+
+
+ hive.default.cluster.name
+ default
+ The default cluster name
+
+
+
+ hive.cluster.properties
+
+ The default filesystem and jobtracker for a cluster
+
+
Index: metastore/scripts/upgrade/derby/010-HIVE-2612.derby.sql
===================================================================
--- metastore/scripts/upgrade/derby/010-HIVE-2612.derby.sql (revision 0)
+++ metastore/scripts/upgrade/derby/010-HIVE-2612.derby.sql (revision 0)
@@ -0,0 +1,27 @@
+/*
+ * HIVE-2612 support hive table/partitions exists in more than one region
+ */
+
+/*
+ * Creates the following table:
+ * - REGION_SDS
+ */
+CREATE TABLE "REGION_SDS" (
+ "SD_ID" bigint NOT NULL,
+ "REGION_NAME" varchar(512) NOT NULL,
+ "LOCATION" varchar(4000),
+ PRIMARY KEY ("SD_ID", "REGION_NAME")
+);
+
+ALTER TABLE "REGION_SDS"
+ ADD CONSTRAINT "REGION_SDS_FK1"
+ FOREIGN KEY ("SD_ID") REFERENCES "SDS" ("SD_ID")
+ ON DELETE NO ACTION ON UPDATE NO ACTION
+;
+
+/* Alter the SDS table to:
+ * - add the column PRIMARY_REGION_NAME
+ */
+ALTER TABLE SDS
+ ADD COLUMN PRIMARY_REGION_NAME varchar(512) NOT NULL DEFAULT ''
+;
Index: metastore/scripts/upgrade/derby/upgrade-0.9.0-to-0.10.0.derby.sql
===================================================================
--- metastore/scripts/upgrade/derby/upgrade-0.9.0-to-0.10.0.derby.sql (revision 0)
+++ metastore/scripts/upgrade/derby/upgrade-0.9.0-to-0.10.0.derby.sql (revision 0)
@@ -0,0 +1,2 @@
+-- Upgrade MetaStore schema from 0.9.0 to 0.10.0
+RUN '010-HIVE-2612.derby.sql';
Index: metastore/scripts/upgrade/derby/hive-schema-0.10.0.derby.sql
===================================================================
--- metastore/scripts/upgrade/derby/hive-schema-0.10.0.derby.sql (revision 0)
+++ metastore/scripts/upgrade/derby/hive-schema-0.10.0.derby.sql (revision 0)
@@ -0,0 +1,251 @@
+-- Timestamp: 2012-02-10 12:21:25.771
+-- Source database is: /home/kevinwilfong/hive/mdb
+-- Connection URL is: jdbc:derby:/home/kevinwilfong/hive/mdb
+-- appendLogs: false
+
+-- ----------------------------------------------
+-- DDL Statements for functions
+-- ----------------------------------------------
+
+CREATE FUNCTION "APP"."NUCLEUS_ASCII" ("C" CHAR(1)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ;
+
+CREATE FUNCTION "APP"."NUCLEUS_MATCHES" ("TEXT" VARCHAR(8000),"PATTERN" VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ;
+
+-- ----------------------------------------------
+-- DDL Statements for tables
+-- ----------------------------------------------
+
+CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT, "PRIMARY_REGION_NAME" VARCHAR(512) NOT NULL DEFAULT '');
+
+CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "ROLE_ID" BIGINT);
+
+CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
+
+CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(128), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT);
+
+CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL);
+
+CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000));
+
+CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "USER_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT);
+
+CREATE TABLE "APP"."REGION_SDS" ("SD_ID" BIGINT NOT NULL, "REGION_NAME" VARCHAR(512) NOT NULL, "LOCATION" VARCHAR(4000));
+
+CREATE TABLE "APP"."BUCKETING_COLS" ("SD_ID" BIGINT NOT NULL, "BUCKET_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(128), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."PARTITION_EVENTS" ("PART_NAME_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_TIME" BIGINT NOT NULL, "EVENT_TYPE" INTEGER NOT NULL, "PARTITION_NAME" VARCHAR(767), "TBL_NAME" VARCHAR(128));
+
+CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(128), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR);
+
+CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
+
+CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(128), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "FIELD_NAME" VARCHAR(128) NOT NULL, "FIELD_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT);
+
+CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, "PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."TYPES" ("TYPES_ID" BIGINT NOT NULL, "TYPE_NAME" VARCHAR(128), "TYPE1" VARCHAR(767), "TYPE2" VARCHAR(767));
+
+CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."DBS" ("DB_ID" BIGINT NOT NULL, "DESC" VARCHAR(4000), "DB_LOCATION_URI" VARCHAR(4000) NOT NULL, "NAME" VARCHAR(128));
+
+CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+-- ----------------------------------------------
+-- DDL Statements for indexes
+-- ----------------------------------------------
+
+CREATE UNIQUE INDEX "APP"."GLOBALPRIVILEGEINDEX" ON "APP"."GLOBAL_PRIVS" ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID");
+
+CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."UNIQUEINDEX" ON "APP"."IDXS" ("INDEX_NAME", "ORIG_TBL_ID");
+
+CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME");
+
+CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME");
+
+CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME");
+
+CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID");
+
+CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+-- ----------------------------------------------
+-- DDL Statements for keys
+-- ----------------------------------------------
+
+-- primary/unique
+ALTER TABLE "APP"."GLOBAL_PRIVS" ADD CONSTRAINT "GLOBAL_PRIVS_PK" PRIMARY KEY ("USER_GRANT_ID");
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_PK" PRIMARY KEY ("SD_ID");
+
+ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_PK" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."REGION_SDS" ADD CONSTRAINT "SQL120210122103870" PRIMARY KEY ("SD_ID", "REGION_NAME");
+
+ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_PK" PRIMARY KEY ("PART_ID");
+
+ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_PK" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."CDS" ADD CONSTRAINT "SQL110922153006460" PRIMARY KEY ("CD_ID");
+
+ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_PK" PRIMARY KEY ("ROLE_GRANT_ID");
+
+ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_PK" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_PK" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_PK" PRIMARY KEY ("INDEX_ID");
+
+ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_PK" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."SEQUENCE_TABLE" ADD CONSTRAINT "SEQUENCE_TABLE_PK" PRIMARY KEY ("SEQUENCE_NAME");
+
+ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "SQL110922153006740" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+ALTER TABLE "APP"."SERDES" ADD CONSTRAINT "SERDES_PK" PRIMARY KEY ("SERDE_ID");
+
+ALTER TABLE "APP"."ROLES" ADD CONSTRAINT "ROLES_PK" PRIMARY KEY ("ROLE_ID");
+
+ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_PK" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_PK" PRIMARY KEY ("DB_ID");
+
+ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_PK" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."TYPES" ADD CONSTRAINT "TYPES_PK" PRIMARY KEY ("TYPES_ID");
+
+ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_PK" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_PK" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_PK" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."PARTITION_EVENTS" ADD CONSTRAINT "PARTITION_EVENTS_PK" PRIMARY KEY ("PART_NAME_ID");
+
+ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_PK" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_PK" PRIMARY KEY ("PART_GRANT_ID");
+
+ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_PK" PRIMARY KEY ("TBL_ID");
+
+ALTER TABLE "APP"."NUCLEUS_TABLES" ADD CONSTRAINT "NUCLEUS_TABLES_PK" PRIMARY KEY ("CLASS_NAME");
+
+ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_PK" PRIMARY KEY ("TBL_GRANT_ID");
+
+ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_PK" PRIMARY KEY ("DB_GRANT_ID");
+
+-- foreign
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK2" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."REGION_SDS" ADD CONSTRAINT "REGION_SDS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_FK1" FOREIGN KEY ("ROLE_ID") REFERENCES "APP"."ROLES" ("ROLE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEYS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK3" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "COLUMNS_V2_FK1" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_FK1" FOREIGN KEY ("TYPE_NAME") REFERENCES "APP"."TYPES" ("TYPES_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_FK1" FOREIGN KEY ("INDEX_ID") REFERENCES "APP"."IDXS" ("INDEX_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+-- ----------------------------------------------
+-- DDL Statements for checks
+-- ----------------------------------------------
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SQL110318025505550" CHECK (IS_COMPRESSED IN ('Y','N'));
+
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "SQL110318025504980" CHECK (DEFERRED_REBUILD IN ('Y','N'));
+
Index: metastore/scripts/upgrade/mysql/010-HIVE-2612.mysql.sql
===================================================================
--- metastore/scripts/upgrade/mysql/010-HIVE-2612.mysql.sql (revision 0)
+++ metastore/scripts/upgrade/mysql/010-HIVE-2612.mysql.sql (revision 0)
@@ -0,0 +1,59 @@
+SELECT '< HIVE-2612 support hive table/partitions exists in more than one region >' AS ' ';
+
+DELIMITER $$
+DROP PROCEDURE IF EXISTS REVERT $$
+DROP PROCEDURE IF EXISTS ALTER_SDS $$
+DROP PROCEDURE IF EXISTS CREATE_TABLE $$
+DROP PROCEDURE IF EXISTS MIGRATE $$
+
+/* Call this procedure to revert all changes by this script */
+CREATE PROCEDURE REVERT()
+ BEGIN
+ ALTER TABLE SDS
+ DROP COLUMN PRIMARY_REGION_NAME
+ ;
+ DROP TABLE IF EXISTS REGION_SDS;
+
+ END $$
+
+/* Alter the SDS table to:
+ * - add the column PRIMARY_REGION_NAME
+ */
+CREATE PROCEDURE ALTER_SDS()
+ BEGIN
+ ALTER TABLE SDS
+ ADD COLUMN PRIMARY_REGION_NAME varchar(512) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT ''
+ ;
+ END $$
+
+/*
+ * Creates the following table:
+ * - REGION_SDS
+ */
+CREATE PROCEDURE CREATE_TABLE()
+ BEGIN
+ CREATE TABLE IF NOT EXISTS `REGION_SDS` (
+ SD_ID bigint(20) NOT NULL,
+ REGION_NAME varchar(512) NOT NULL,
+ LOCATION varchar(4000),
+ PRIMARY KEY (`SD_ID`, `REGION_NAME`),
+ KEY `REGION_SDS_N49` (`SD_ID`),
+ CONSTRAINT `REGION_SDS_V2_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1
+ ;
+ END $$
+
+/*
+ * Procedures called before migration happens
+ */
+CREATE PROCEDURE MIGRATE()
+ BEGIN
+ call CREATE_TABLE();
+ SELECT 'Created table REGION_SDS';
+ call ALTER_SDS();
+ SELECT 'Altered the SDS table';
+ END $$
+
+DELIMITER ;
+
+CALL MIGRATE();
Index: metastore/scripts/upgrade/mysql/upgrade-0.9.0-to-0.10.0.mysql.sql
===================================================================
--- metastore/scripts/upgrade/mysql/upgrade-0.9.0-to-0.10.0.mysql.sql (revision 0)
+++ metastore/scripts/upgrade/mysql/upgrade-0.9.0-to-0.10.0.mysql.sql (revision 0)
@@ -0,0 +1,3 @@
+SELECT 'Upgrading MetaStore schema from 0.9.0 to 0.10.0' AS ' ';
+SOURCE 010-HIVE-2612.mysql.sql;
+SELECT 'Finished upgrading MetaStore schema from 0.9.0 to 0.10.0' AS ' ';
Index: metastore/scripts/upgrade/mysql/hive-schema-0.10.0.mysql.sql
===================================================================
--- metastore/scripts/upgrade/mysql/hive-schema-0.10.0.mysql.sql (revision 0)
+++ metastore/scripts/upgrade/mysql/hive-schema-0.10.0.mysql.sql (revision 0)
@@ -0,0 +1,655 @@
+-- MySQL dump 10.13 Distrib 5.1.53, for redhat-linux-gnu (x86_64)
+--
+-- Host: cdb259.snc1 Database: test_kevinwilfong_metastore
+-- ------------------------------------------------------
+-- Server version 5.1.47_blackhole_memcache-log
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+DROP TABLE IF EXISTS `BUCKETING_COLS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `BUCKETING_COLS` (
+ `SD_ID` bigint(20) NOT NULL,
+ `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+ KEY `BUCKETING_COLS_N49` (`SD_ID`),
+ CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+DROP TABLE IF EXISTS `CDS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `CDS` (
+ `CD_ID` bigint(20) NOT NULL,
+ PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+DROP TABLE IF EXISTS `COLUMNS_V2`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `COLUMNS_V2` (
+ `CD_ID` bigint(20) NOT NULL,
+ `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TYPE_NAME` varchar(4000) DEFAULT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+ KEY `COLUMNS_V2_N49` (`CD_ID`),
+ CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+DROP TABLE IF EXISTS `DATABASE_PARAMS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `DATABASE_PARAMS` (
+ `DB_ID` bigint(20) NOT NULL,
+ `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+ KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+ CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+DROP TABLE IF EXISTS `DBS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `DBS` (
+ `DB_ID` bigint(20) NOT NULL,
+ `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`DB_ID`),
+ UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+DROP TABLE IF EXISTS `DB_PRIVS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `DB_PRIVS` (
+ `DB_GRANT_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `DB_ID` bigint(20) DEFAULT NULL,
+ `GRANT_OPTION` smallint(6) NOT NULL,
+ `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`DB_GRANT_ID`),
+ UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+ KEY `DB_PRIVS_N49` (`DB_ID`),
+ CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+DROP TABLE IF EXISTS `GLOBAL_PRIVS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `GLOBAL_PRIVS` (
+ `USER_GRANT_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `GRANT_OPTION` smallint(6) NOT NULL,
+ `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`USER_GRANT_ID`),
+ UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+DROP TABLE IF EXISTS `IDXS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `IDXS` (
+ `INDEX_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `DEFERRED_REBUILD` bit(1) NOT NULL,
+ `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+ `LAST_ACCESS_TIME` int(11) NOT NULL,
+ `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+ `SD_ID` bigint(20) DEFAULT NULL,
+ PRIMARY KEY (`INDEX_ID`),
+ UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+ KEY `IDXS_N51` (`SD_ID`),
+ KEY `IDXS_N50` (`INDEX_TBL_ID`),
+ KEY `IDXS_N49` (`ORIG_TBL_ID`),
+ CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+ CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+ CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+DROP TABLE IF EXISTS `INDEX_PARAMS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `INDEX_PARAMS` (
+ `INDEX_ID` bigint(20) NOT NULL,
+ `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+ KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+ CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+DROP TABLE IF EXISTS `NUCLEUS_TABLES`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `NUCLEUS_TABLES` (
+ `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+DROP TABLE IF EXISTS `PARTITIONS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `PARTITIONS` (
+ `PART_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `LAST_ACCESS_TIME` int(11) NOT NULL,
+ `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `SD_ID` bigint(20) DEFAULT NULL,
+ `TBL_ID` bigint(20) DEFAULT NULL,
+ PRIMARY KEY (`PART_ID`),
+ UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+ KEY `PARTITIONS_N49` (`TBL_ID`),
+ KEY `PARTITIONS_N50` (`SD_ID`),
+ CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+ CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+DROP TABLE IF EXISTS `PARTITION_EVENTS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `PARTITION_EVENTS` (
+ `PART_NAME_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `EVENT_TIME` bigint(20) NOT NULL,
+ `EVENT_TYPE` int(11) NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`PART_NAME_ID`),
+ KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+DROP TABLE IF EXISTS `PARTITION_KEYS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `PARTITION_KEYS` (
+ `TBL_ID` bigint(20) NOT NULL,
+ `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+ KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+ CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+DROP TABLE IF EXISTS `PARTITION_KEY_VALS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `PARTITION_KEY_VALS` (
+ `PART_ID` bigint(20) NOT NULL,
+ `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+ KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+ CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+DROP TABLE IF EXISTS `PARTITION_PARAMS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `PARTITION_PARAMS` (
+ `PART_ID` bigint(20) NOT NULL,
+ `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+ KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+ CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+DROP TABLE IF EXISTS `PART_COL_PRIVS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `PART_COL_PRIVS` (
+ `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `GRANT_OPTION` smallint(6) NOT NULL,
+ `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PART_ID` bigint(20) DEFAULT NULL,
+ `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+ KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+ KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+ CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+DROP TABLE IF EXISTS `PART_PRIVS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `PART_PRIVS` (
+ `PART_GRANT_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `GRANT_OPTION` smallint(6) NOT NULL,
+ `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PART_ID` bigint(20) DEFAULT NULL,
+ `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`PART_GRANT_ID`),
+ KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+ KEY `PART_PRIVS_N49` (`PART_ID`),
+ CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `REGION_SDS`
+--
+
+DROP TABLE IF EXISTS `REGION_SDS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `REGION_SDS` (
+ `SD_ID` bigint(20) NOT NULL,
+ `REGION_NAME` varchar(512) NOT NULL,
+ `LOCATION` varchar(4000) DEFAULT NULL,
+ PRIMARY KEY (`SD_ID`,`REGION_NAME`),
+ KEY `REGION_SDS_N49` (`SD_ID`),
+ CONSTRAINT `REGION_SDS_V2_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+DROP TABLE IF EXISTS `ROLES`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `ROLES` (
+ `ROLE_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`ROLE_ID`),
+ UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+DROP TABLE IF EXISTS `ROLE_MAP`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `ROLE_MAP` (
+ `ROLE_GRANT_ID` bigint(20) NOT NULL,
+ `ADD_TIME` int(11) NOT NULL,
+ `GRANT_OPTION` smallint(6) NOT NULL,
+ `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `ROLE_ID` bigint(20) DEFAULT NULL,
+ PRIMARY KEY (`ROLE_GRANT_ID`),
+ UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+ KEY `ROLE_MAP_N49` (`ROLE_ID`),
+ CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+DROP TABLE IF EXISTS `SDS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `SDS` (
+ `SD_ID` bigint(20) NOT NULL,
+ `CD_ID` bigint(20) DEFAULT NULL,
+ `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `IS_COMPRESSED` bit(1) NOT NULL,
+ `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `NUM_BUCKETS` int(11) NOT NULL,
+ `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `SERDE_ID` bigint(20) DEFAULT NULL,
+ `PRIMARY_REGION_NAME` varchar(512) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '',
+ PRIMARY KEY (`SD_ID`),
+ KEY `SDS_N49` (`SERDE_ID`),
+ KEY `SDS_N50` (`CD_ID`),
+ CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+ CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+DROP TABLE IF EXISTS `SD_PARAMS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `SD_PARAMS` (
+ `SD_ID` bigint(20) NOT NULL,
+ `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+ KEY `SD_PARAMS_N49` (`SD_ID`),
+ CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+DROP TABLE IF EXISTS `SEQUENCE_TABLE`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `SEQUENCE_TABLE` (
+ `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `NEXT_VAL` bigint(20) NOT NULL,
+ PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+DROP TABLE IF EXISTS `SERDES`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `SERDES` (
+ `SERDE_ID` bigint(20) NOT NULL,
+ `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+DROP TABLE IF EXISTS `SERDE_PARAMS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `SERDE_PARAMS` (
+ `SERDE_ID` bigint(20) NOT NULL,
+ `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+ KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+ CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+DROP TABLE IF EXISTS `SORT_COLS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `SORT_COLS` (
+ `SD_ID` bigint(20) NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `ORDER` int(11) NOT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+ KEY `SORT_COLS_N49` (`SD_ID`),
+ CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+DROP TABLE IF EXISTS `TABLE_PARAMS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `TABLE_PARAMS` (
+ `TBL_ID` bigint(20) NOT NULL,
+ `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+ KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+ CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+DROP TABLE IF EXISTS `TBLS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `TBLS` (
+ `TBL_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `DB_ID` bigint(20) DEFAULT NULL,
+ `LAST_ACCESS_TIME` int(11) NOT NULL,
+ `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `RETENTION` int(11) NOT NULL,
+ `SD_ID` bigint(20) DEFAULT NULL,
+ `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `VIEW_EXPANDED_TEXT` mediumtext,
+ `VIEW_ORIGINAL_TEXT` mediumtext,
+ PRIMARY KEY (`TBL_ID`),
+ UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+ KEY `TBLS_N50` (`SD_ID`),
+ KEY `TBLS_N49` (`DB_ID`),
+ CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+ CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+DROP TABLE IF EXISTS `TBL_COL_PRIVS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `TBL_COL_PRIVS` (
+ `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `GRANT_OPTION` smallint(6) NOT NULL,
+ `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TBL_ID` bigint(20) DEFAULT NULL,
+ PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+ KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+ KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+ CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+DROP TABLE IF EXISTS `TBL_PRIVS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `TBL_PRIVS` (
+ `TBL_GRANT_ID` bigint(20) NOT NULL,
+ `CREATE_TIME` int(11) NOT NULL,
+ `GRANT_OPTION` smallint(6) NOT NULL,
+ `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TBL_ID` bigint(20) DEFAULT NULL,
+ PRIMARY KEY (`TBL_GRANT_ID`),
+ KEY `TBL_PRIVS_N49` (`TBL_ID`),
+ KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+ CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPES`
+--
+
+DROP TABLE IF EXISTS `TYPES`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `TYPES` (
+ `TYPES_ID` bigint(20) NOT NULL,
+ `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`TYPES_ID`),
+ UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+DROP TABLE IF EXISTS `TYPE_FIELDS`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `TYPE_FIELDS` (
+ `TYPE_NAME` bigint(20) NOT NULL,
+ `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+ `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `INTEGER_IDX` int(11) NOT NULL,
+ PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+ KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+ CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-02-10 12:02:33
Index: metastore/scripts/upgrade/postgres/010-HIVE-2612.postgres.sql
===================================================================
--- metastore/scripts/upgrade/postgres/010-HIVE-2612.postgres.sql (revision 0)
+++ metastore/scripts/upgrade/postgres/010-HIVE-2612.postgres.sql (revision 0)
@@ -0,0 +1,26 @@
+SELECT '< HIVE-2612 support hive table/partitions exists in more than one region >';
+
+--
+-- Table: REGION_SDS
+--
+
+CREATE TABLE "REGION_SDS" (
+ "SD_ID" bigint NOT NULL,
+ "REGION_NAME" character varying(512) NOT NULL,
+ "LOCATION" character varying(4000) DEFAULT NULL,
+ PRIMARY KEY ("SD_ID", "REGION_NAME")
+);
+
+--
+-- Foreign Key Definitions
+--
+
+ALTER TABLE "REGION_SDS" ADD FOREIGN KEY ("SD_ID")
+ REFERENCES "SDS" ("SD_ID") DEFERRABLE;
+
+--
+-- Alter Table: SDS
+--
+
+ALTER TABLE "SDS" ADD COLUMN "PRIMARY_REGION_NAME"
+ character varying(512) NOT NULL DEFAULT '';
Index: metastore/src/model/package.jdo
===================================================================
--- metastore/src/model/package.jdo (revision 1242898)
+++ metastore/src/model/package.jdo (working copy)
@@ -1,5 +1,5 @@
-
-
+
-
-
-
+
+
+
-
+
@@ -50,15 +50,15 @@
-
-
-
+
+
+
-
+
-
+
@@ -216,6 +216,16 @@
+
+
+
+
+
+
+
+
+
+
@@ -274,6 +284,29 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -319,13 +352,13 @@
-
+
-
+
@@ -386,7 +419,7 @@
-
+
@@ -425,7 +458,7 @@
-
+
@@ -433,7 +466,7 @@
-
+
@@ -462,7 +495,7 @@
-
+
@@ -503,7 +536,7 @@
-
+
@@ -544,7 +577,7 @@
-
+
@@ -585,7 +618,7 @@
-
+
@@ -630,7 +663,7 @@
-
+
@@ -674,17 +707,17 @@
-
-
+
+
-
+
-
-
+
+
Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java
===================================================================
--- metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java (revision 1242898)
+++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java (working copy)
@@ -20,6 +20,7 @@
import java.util.List;
import java.util.Map;
+import java.util.Set;
public class MStorageDescriptor {
private MColumnDescriptor cd;
@@ -32,6 +33,8 @@
private List bucketCols;
private List sortCols;
private Map parameters;
+ private String primaryRegionName;
+ private Set secondaryRegions;
public MStorageDescriptor() {}
@@ -47,10 +50,14 @@
* @param bucketCols
* @param sortOrder
* @param parameters
+ * @param primaryRegionName
+ * @param secondaryRegions
*/
public MStorageDescriptor(MColumnDescriptor cd, String location, String inputFormat,
String outputFormat, boolean isCompressed, int numBuckets, MSerDeInfo serDeInfo,
- List bucketCols, List sortOrder, Map parameters) {
+ List bucketCols, List sortOrder, Map parameters,
+ String primaryRegionName,
+ Set secondaryRegions) {
this.cd = cd;
this.location = location;
this.inputFormat = inputFormat;
@@ -61,6 +68,8 @@
this.bucketCols = bucketCols;
this.sortCols = sortOrder;
this.parameters = parameters;
+ this.primaryRegionName = primaryRegionName;
+ this.secondaryRegions = secondaryRegions;
}
@@ -205,4 +214,22 @@
public List getSortCols() {
return sortCols;
}
+
+ public String getPrimaryRegionName() {
+ return primaryRegionName;
+ }
+
+ public void setPrimaryRegionName(String primaryRegionName) {
+ this.primaryRegionName = primaryRegionName;
+ }
+
+ public Set getSecondaryRegions() {
+ return secondaryRegions;
+ }
+
+ public void setSecondaryRegions(
+ Set secondaryRegions) {
+ this.secondaryRegions = secondaryRegions;
+ }
+
}
Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MRegionStorageDescriptor.java
===================================================================
--- metastore/src/model/org/apache/hadoop/hive/metastore/model/MRegionStorageDescriptor.java (revision 0)
+++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MRegionStorageDescriptor.java (revision 0)
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.model;
+
+import java.io.Serializable;
+import java.util.Map;
+
+public class MRegionStorageDescriptor implements Serializable {
+
+ private String regionName;
+ private String location;
+
+ public MRegionStorageDescriptor() {}
+
+ /**
+ * @param regionName
+ * @param location
+ */
+ public MRegionStorageDescriptor(
+ String regionName,
+ String location) {
+ this.regionName = regionName;
+ this.location = location;
+ }
+
+
+ /**
+ * @return region name
+ */
+ public String getRegionName() {
+ return regionName;
+ }
+
+ /**
+ * @param region
+ */
+ public void setRegionName(String regionName) {
+ this.regionName = regionName;
+ }
+
+ /**
+ * @return data location stored in this region descriptor
+ */
+ public String getLocation() {
+ return location;
+ }
+
+ /**
+ * @param location in this region descriptor
+ */
+ public void setLocation(String location) {
+ this.location = location;
+ }
+}
Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java.orig
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java.orig (revision 0)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java.orig (revision 0)
@@ -0,0 +1,3688 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import static org.apache.commons.lang.StringUtils.join;
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_COMMENT;
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
+import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName;
+
+import java.io.IOException;
+import java.util.AbstractMap;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Formatter;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.Set;
+import java.util.Timer;
+import java.util.regex.Pattern;
+
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.JavaUtils;
+import org.apache.hadoop.hive.common.LogUtils;
+import org.apache.hadoop.hive.common.LogUtils.LogInitializationException;
+import org.apache.hadoop.hive.common.classification.InterfaceAudience;
+import org.apache.hadoop.hive.common.classification.InterfaceStability;
+import org.apache.hadoop.hive.common.cli.CommonCliOptions;
+import org.apache.hadoop.hive.common.metrics.Metrics;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
+import org.apache.hadoop.hive.metastore.api.Constants;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+import org.apache.hadoop.hive.metastore.api.HiveObjectType;
+import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.IndexAlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
+import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.events.EventCleanerTask;
+import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.hooks.JDOConnectionURLHook;
+import org.apache.hadoop.hive.metastore.model.MDBPrivilege;
+import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege;
+import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege;
+import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege;
+import org.apache.hadoop.hive.metastore.model.MRole;
+import org.apache.hadoop.hive.metastore.model.MRoleMap;
+import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
+import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
+import org.apache.hadoop.hive.serde2.Deserializer;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.SerDeUtils;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge;
+import org.apache.hadoop.hive.thrift.TUGIContainingTransport;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.TProcessor;
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.server.TServer;
+import org.apache.thrift.server.TThreadPoolServer;
+import org.apache.thrift.transport.TServerSocket;
+import org.apache.thrift.transport.TServerTransport;
+import org.apache.thrift.transport.TTransportFactory;
+
+import com.facebook.fb303.FacebookBase;
+import com.facebook.fb303.fb_status;
+
+/**
+ * TODO:pc remove application logic to a separate interface.
+ */
+public class HiveMetaStore extends ThriftHiveMetastore {
+ public static final Log LOG = LogFactory.getLog(
+ HiveMetaStore.class);
+
+ /**
+ * default port on which to start the Hive server
+ */
+ private static final int DEFAULT_HIVE_METASTORE_PORT = 9083;
+
+ private static HadoopThriftAuthBridge.Server saslServer;
+ private static boolean useSasl;
+
+ public static class HMSHandler extends FacebookBase implements
+ ThriftHiveMetastore.Iface {
+ public static final Log LOG = HiveMetaStore.LOG;
+ private static boolean createDefaultDB = false;
+ private String rawStoreClassName;
+ private final HiveConf hiveConf; // stores datastore (jpox) properties,
+ // right now they come from jpox.properties
+
+ private Warehouse wh; // hdfs warehouse
+ private final ThreadLocal threadLocalMS =
+ new ThreadLocal() {
+ @Override
+ protected synchronized RawStore initialValue() {
+ return null;
+ }
+ };
+
+ // Thread local configuration is needed as many threads could make changes
+ // to the conf using the connection hook
+ private final ThreadLocal threadLocalConf =
+ new ThreadLocal() {
+ @Override
+ protected synchronized Configuration initialValue() {
+ return null;
+ }
+ };
+
+ public static final String AUDIT_FORMAT =
+ "ugi=%s\t" + // ugi
+ "ip=%s\t" + // remote IP
+ "cmd=%s\t"; // command
+ public static final Log auditLog = LogFactory.getLog(
+ HiveMetaStore.class.getName() + ".audit");
+ private static final ThreadLocal auditFormatter =
+ new ThreadLocal() {
+ @Override
+ protected Formatter initialValue() {
+ return new Formatter(new StringBuilder(AUDIT_FORMAT.length() * 4));
+ }
+ };
+
+ private final void logAuditEvent(String cmd) {
+ if (!useSasl || cmd == null) {
+ return;
+ }
+
+ UserGroupInformation ugi;
+ try {
+ ugi = ShimLoader.getHadoopShims().getUGIForConf(getConf());
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ final Formatter fmt = auditFormatter.get();
+ ((StringBuilder)fmt.out()).setLength(0);
+ auditLog.info(fmt.format(AUDIT_FORMAT, ugi.getUserName(),
+ saslServer.getRemoteAddress().toString(), cmd).toString());
+ }
+
+ // The next serial number to be assigned
+ private boolean checkForDefaultDb;
+ private static int nextSerialNum = 0;
+ private static ThreadLocal threadLocalId = new ThreadLocal() {
+ @Override
+ protected synchronized Object initialValue() {
+ return new Integer(nextSerialNum++);
+ }
+ };
+
+ // Used for retrying JDO calls on datastore failures
+ private int retryInterval = 0;
+ private int retryLimit = 0;
+ private JDOConnectionURLHook urlHook = null;
+ private String urlHookClassName = "";
+
+ public static Integer get() {
+ return threadLocalId.get();
+ }
+
+ public HMSHandler(String name) throws MetaException {
+ super(name);
+ hiveConf = new HiveConf(this.getClass());
+ init();
+ }
+
+ public HMSHandler(String name, HiveConf conf) throws MetaException {
+ super(name);
+ hiveConf = conf;
+ init();
+ }
+
+ public HiveConf getHiveConf() {
+ return hiveConf;
+ }
+
+ private ClassLoader classLoader;
+ private AlterHandler alterHandler;
+ private List listeners;
+ private List endFunctionListeners;
+
+ {
+ classLoader = Thread.currentThread().getContextClassLoader();
+ if (classLoader == null) {
+ classLoader = Configuration.class.getClassLoader();
+ }
+ }
+
+ private boolean init() throws MetaException {
+ rawStoreClassName = hiveConf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL);
+ checkForDefaultDb = hiveConf.getBoolean(
+ "hive.metastore.checkForDefaultDb", true);
+ String alterHandlerName = hiveConf.get("hive.metastore.alter.impl",
+ HiveAlterHandler.class.getName());
+ alterHandler = (AlterHandler) ReflectionUtils.newInstance(getClass(
+ alterHandlerName, AlterHandler.class), hiveConf);
+ wh = new Warehouse(hiveConf);
+
+ retryInterval = HiveConf.getIntVar(hiveConf,
+ HiveConf.ConfVars.METASTOREINTERVAL);
+ retryLimit = HiveConf.getIntVar(hiveConf,
+ HiveConf.ConfVars.METASTOREATTEMPTS);
+ // Using the hook on startup ensures that the hook always has priority
+ // over settings in *.xml. We can use hiveConf as only a single thread
+ // will be calling the constructor.
+ updateConnectionURL(hiveConf, null);
+
+ createDefaultDB();
+
+ if (hiveConf.getBoolean("hive.metastore.metrics.enabled", false)) {
+ try {
+ Metrics.init();
+ } catch (Exception e) {
+ // log exception, but ignore inability to start
+ LOG.error("error in Metrics init: " + e.getClass().getName() + " "
+ + e.getMessage());
+ MetaStoreUtils.printStackTrace(e);
+
+ }
+ }
+
+ listeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, hiveConf,
+ hiveConf.getVar(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS));
+ endFunctionListeners = MetaStoreUtils.getMetaStoreListeners(
+ MetaStoreEndFunctionListener.class, hiveConf,
+ hiveConf.getVar(HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS));
+
+ long cleanFreq = hiveConf.getLongVar(ConfVars.METASTORE_EVENT_CLEAN_FREQ) * 1000L;
+ if(cleanFreq > 0){
+ // In default config, there is no timer.
+ Timer cleaner = new Timer("Metastore Events Cleaner Thread", true);
+ cleaner.schedule(new EventCleanerTask(this), cleanFreq, cleanFreq);
+ }
+ return true;
+ }
+
+ private String addPrefix(String s) {
+ return threadLocalId.get() + ": " + s;
+ }
+
+ /**
+ * A Command is a closure used to pass a block of code from individual
+ * functions to executeWithRetry, which centralizes connection error
+ * handling. Command is parameterized on the return type of the function.
+ *
+ * The general transformation is:
+ *
+ * From:
+ * String foo(int a) throws ExceptionB {
+ *
+ * }
+ *
+ * To:
+ * String foo(final int a) throws ExceptionB {
+ * String ret = null;
+ * try {
+ * ret = executeWithRetry(new Command() {
+ * String run(RawStore ms) {
+ *
+ * }
+ * }
+ * } catch (ExceptionB e) {
+ * throw e;
+ * } catch (Exception e) {
+ * // Since run is only supposed to throw ExceptionB it could only
+ * // be a runtime exception
+ * throw (RuntimeException)e;
+ * }
+ * }
+ *
+ * The catch blocks are used to ensure that the exceptions thrown by the
+ * follow the function definition.
+ */
+ @InterfaceAudience.LimitedPrivate({"HCATALOG"})
+ @InterfaceStability.Evolving
+ public static class Command {
+
+ @InterfaceAudience.LimitedPrivate({"HCATALOG"})
+ @InterfaceStability.Evolving
+ public T run(RawStore ms) throws Exception {
+ return null;
+ }
+ }
+ @InterfaceAudience.LimitedPrivate({"HCATALOG"})
+ @InterfaceStability.Evolving
+ public T executeWithRetry(Command cmd) throws Exception {
+ T ret = null;
+
+ boolean gotNewConnectUrl = false;
+ boolean reloadConf = HiveConf.getBoolVar(hiveConf,
+ HiveConf.ConfVars.METASTOREFORCERELOADCONF);
+
+ if (reloadConf) {
+ updateConnectionURL(getConf(), null);
+ }
+
+ int retryCount = 0;
+ Exception caughtException = null;
+ while(true) {
+ try {
+ RawStore ms = getMS(reloadConf || gotNewConnectUrl);
+ ret = cmd.run(ms);
+ break;
+ } catch (javax.jdo.JDOException e) {
+ caughtException = e;
+ }
+
+ if (retryCount >= retryLimit) {
+ throw caughtException;
+ }
+
+ assert(retryInterval >= 0);
+ retryCount++;
+ LOG.error(
+ String.format(
+ "JDO datastore error. Retrying metastore command " +
+ "after %d ms (attempt %d of %d)", retryInterval, retryCount, retryLimit));
+ Thread.sleep(retryInterval);
+ // If we have a connection error, the JDO connection URL hook might
+ // provide us with a new URL to access the datastore.
+ String lastUrl = getConnectionURL(getConf());
+ gotNewConnectUrl = updateConnectionURL(getConf(), lastUrl);
+ }
+ return ret;
+ }
+
+ private Configuration getConf() {
+ Configuration conf = threadLocalConf.get();
+ if (conf == null) {
+ conf = new Configuration(hiveConf);
+ threadLocalConf.set(conf);
+ }
+ return conf;
+ }
+
+ /**
+ * Get a cached RawStore.
+ *
+ * @return
+ * @throws MetaException
+ */
+ @InterfaceAudience.LimitedPrivate({"HCATALOG"})
+ @InterfaceStability.Evolving
+ public RawStore getMS(boolean reloadConf) throws MetaException {
+ RawStore ms = threadLocalMS.get();
+ if (ms == null) {
+ LOG.info(addPrefix("Opening raw store with implemenation class:"
+ + rawStoreClassName));
+ ms = (RawStore) ReflectionUtils.newInstance(getClass(rawStoreClassName,
+ RawStore.class), getConf());
+ threadLocalMS.set(ms);
+ ms = threadLocalMS.get();
+ }
+
+ if (reloadConf) {
+ ms.setConf(getConf());
+ }
+
+ return ms;
+ }
+
+ /**
+ * Updates the connection URL in hiveConf using the hook
+ * @return true if a new connection URL was loaded into the thread local
+ * configuration
+ */
+ private boolean updateConnectionURL(Configuration conf, String badUrl)
+ throws MetaException {
+ String connectUrl = null;
+ String currentUrl = getConnectionURL(conf);
+ try {
+ // We always call init because the hook name in the configuration could
+ // have changed.
+ initConnectionUrlHook();
+ if (urlHook != null) {
+ if (badUrl != null) {
+ urlHook.notifyBadConnectionUrl(badUrl);
+ }
+ connectUrl = urlHook.getJdoConnectionUrl(hiveConf);
+ }
+ } catch (Exception e) {
+ LOG.error("Exception while getting connection URL from the hook: " +
+ e);
+ }
+
+ if (connectUrl != null && !connectUrl.equals(currentUrl)) {
+ LOG.error(addPrefix(
+ String.format("Overriding %s with %s",
+ HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(),
+ connectUrl)));
+ conf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(),
+ connectUrl);
+ return true;
+ }
+ return false;
+ }
+
+ private static String getConnectionURL(Configuration conf) {
+ return conf.get(
+ HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(), "");
+ }
+
+ // Multiple threads could try to initialize at the same time.
+ synchronized private void initConnectionUrlHook()
+ throws ClassNotFoundException {
+
+ String className =
+ hiveConf.get(HiveConf.ConfVars.METASTORECONNECTURLHOOK.toString(), "").trim();
+ if (className.equals("")) {
+ urlHookClassName = "";
+ urlHook = null;
+ return;
+ }
+ boolean urlHookChanged = !urlHookClassName.equals(className);
+ if (urlHook == null || urlHookChanged) {
+ urlHookClassName = className.trim();
+
+ Class > urlHookClass = Class.forName(urlHookClassName, true,
+ JavaUtils.getClassLoader());
+ urlHook = (JDOConnectionURLHook) ReflectionUtils.newInstance(urlHookClass, null);
+ }
+ return;
+ }
+
+ private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException {
+ try {
+ ms.getDatabase(DEFAULT_DATABASE_NAME);
+ } catch (NoSuchObjectException e) {
+ ms.createDatabase(
+ new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT,
+ getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null));
+ }
+ HMSHandler.createDefaultDB = true;
+ }
+ /**
+ * create default database if it doesn't exist
+ *
+ * @throws MetaException
+ */
+ private void createDefaultDB() throws MetaException {
+ if (HMSHandler.createDefaultDB || !checkForDefaultDb) {
+ return;
+ }
+
+ try {
+ executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ createDefaultDB_core(ms);
+ return Boolean.TRUE;
+ }
+ });
+ } catch (InvalidObjectException e) {
+ throw new MetaException(e.getMessage());
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ }
+
+ }
+
+ private Class> getClass(String rawStoreClassName, Class> class1)
+ throws MetaException {
+ try {
+ return Class.forName(rawStoreClassName, true, classLoader);
+ } catch (ClassNotFoundException e) {
+ throw new MetaException(rawStoreClassName + " class not found");
+ }
+ }
+
+ private void logInfo(String m) {
+ LOG.info(threadLocalId.get().toString() + ": " + m);
+ logAuditEvent(m);
+ }
+
+ public String startFunction(String function, String extraLogInfo) {
+ incrementCounter(function);
+ logInfo(function + extraLogInfo);
+ try {
+ Metrics.startScope(function);
+ } catch (IOException e) {
+ LOG.debug("Exception when starting metrics scope"
+ + e.getClass().getName() + " " + e.getMessage());
+ MetaStoreUtils.printStackTrace(e);
+ }
+ return function;
+ }
+
+ public String startFunction(String function) {
+ return startFunction(function, "");
+ }
+
+ public String startTableFunction(String function, String db, String tbl) {
+ return startFunction(function, " : db=" + db + " tbl=" + tbl);
+ }
+
+ public String startMultiTableFunction(String function, String db, List tbls) {
+ String tableNames = join(tbls, ",");
+ return startFunction(function, " : db=" + db + " tbls=" + tableNames);
+ }
+
+ public String startPartitionFunction(String function, String db, String tbl,
+ List partVals) {
+ return startFunction(function, " : db=" + db + " tbl=" + tbl
+ + "[" + join(partVals, ",") + "]" );
+ }
+
+ public String startPartitionFunction(String function, String db, String tbl,
+ Map partName) {
+ return startFunction(function, " : db=" + db + " tbl=" + tbl + "partition=" + partName);
+ }
+
+ public void endFunction(String function, boolean successful) {
+ endFunction(function, new MetaStoreEndFunctionContext(successful));
+ }
+
+ public void endFunction(String function, MetaStoreEndFunctionContext context) {
+ try {
+ Metrics.endScope(function);
+ } catch (IOException e) {
+ LOG.debug("Exception when closing metrics scope" + e);
+ }
+
+ for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
+ listener.onEndFunction(function, context);
+ }
+ }
+
+ @Override
+ public fb_status getStatus() {
+ return fb_status.ALIVE;
+ }
+
+ @Override
+ public void shutdown() {
+ logInfo("Shutting down the object store...");
+ RawStore ms = threadLocalMS.get();
+ if (ms != null) {
+ ms.shutdown();
+ ms = null;
+ }
+ logInfo("Metastore shutdown complete.");
+ }
+
+ @Override
+ public AbstractMap getCounters() {
+ AbstractMap counters = super.getCounters();
+
+ // Allow endFunctionListeners to add any counters they have collected
+ if (endFunctionListeners != null) {
+ for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
+ listener.exportCounters(counters);
+ }
+ }
+
+ return counters;
+ }
+
+ private static final String DATABASE_WAREHOUSE_SUFFIX = ".db";
+
+ private Path getDefaultDatabasePath(String dbName) throws MetaException {
+ if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
+ return wh.getWhRoot();
+ }
+ return new Path(wh.getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX);
+ }
+
+ private void create_database_core(RawStore ms, final Database db)
+ throws AlreadyExistsException, InvalidObjectException, MetaException,
+ IOException {
+ if (!validateName(db.getName())) {
+ throw new InvalidObjectException(db.getName() + " is not a valid database name");
+ }
+ if (null == db.getLocationUri()) {
+ db.setLocationUri(getDefaultDatabasePath(db.getName()).toString());
+ } else {
+ db.setLocationUri(wh.getDnsPath(new Path(db.getLocationUri())).toString());
+ }
+ Path dbPath = new Path(db.getLocationUri());
+ boolean success = false;
+ boolean madeDir = false;
+ try {
+ if (!wh.isDir(dbPath)) {
+ if (!wh.mkdirs(dbPath)) {
+ throw new MetaException("Unable to create database path " + dbPath +
+ ", failed to create database " + db.getName());
+ }
+ madeDir = true;
+ }
+
+ ms.openTransaction();
+ ms.createDatabase(db);
+ success = ms.commitTransaction();
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ if (madeDir) {
+ wh.deleteDir(dbPath, true);
+ }
+ }
+ for (MetaStoreEventListener listener : listeners) {
+ listener.onCreateDatabase(new CreateDatabaseEvent(db, success, this));
+ }
+ }
+ }
+
+ public void create_database(final Database db)
+ throws AlreadyExistsException, InvalidObjectException, MetaException {
+ startFunction("create_database", ": "
+ + db.getName() + " "
+ + db.getLocationUri() + " "
+ + db.getDescription());
+ boolean success = false;
+ try {
+ try {
+ if(null != get_database(db.getName())) {
+ throw new AlreadyExistsException("Database " + db.getName() + " already exists");
+ }
+ } catch (NoSuchObjectException e) {
+ // expected
+ }
+ success = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ create_database_core(ms, db);
+ return Boolean.TRUE;
+ }
+ });
+ } catch (AlreadyExistsException e) {
+ throw e;
+ } catch (InvalidObjectException e) {
+ throw e;
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("create_database", success);
+ }
+ }
+
+ public Database get_database(final String name) throws NoSuchObjectException,
+ MetaException {
+ startFunction("get_database", ": " + name);
+ Database db = null;
+ try {
+ db = executeWithRetry(new Command() {
+ @Override
+ public Database run(RawStore ms) throws Exception {
+ return ms.getDatabase(name);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_database", db != null);
+ }
+ return db;
+ }
+
+ public void alter_database(final String dbName, final Database db)
+ throws NoSuchObjectException, TException, MetaException {
+ startFunction("alter_database" + dbName);
+ boolean success = false;
+ try {
+ success = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ return ms.alterDatabase(dbName, db);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (TException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException) e;
+ } finally {
+ endFunction("alter_database", success);
+ }
+ }
+
+ private void drop_database_core(RawStore ms,
+ final String name, final boolean deleteData, final boolean cascade)
+ throws NoSuchObjectException, InvalidOperationException, MetaException,
+ IOException {
+ boolean success = false;
+ Database db = null;
+ try {
+ ms.openTransaction();
+ db = ms.getDatabase(name);
+ List allTables = get_all_tables(db.getName());
+ if (!cascade && !allTables.isEmpty()) {
+ throw new InvalidOperationException("Database " + db.getName() + " is not empty");
+ }
+ Path path = new Path(db.getLocationUri()).getParent();
+ if (!wh.isWritable(path)) {
+ throw new MetaException("Database not dropped since " +
+ path + " is not writable by " +
+ hiveConf.getUser());
+ }
+ if (ms.dropDatabase(name)) {
+ success = ms.commitTransaction();
+ }
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ } else if (deleteData) {
+ wh.deleteDir(new Path(db.getLocationUri()), true);
+ // it is not a terrible thing even if the data is not deleted
+ }
+ for (MetaStoreEventListener listener : listeners) {
+ listener.onDropDatabase(new DropDatabaseEvent(db, success, this));
+ }
+ }
+ }
+
+ public void drop_database(final String dbName, final boolean deleteData, final boolean cascade)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+
+ startFunction("drop_database", ": " + dbName);
+ if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(dbName)) {
+ endFunction("drop_database", false);
+ throw new MetaException("Can not drop default database");
+ }
+
+ boolean success = false;
+ try {
+ success = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ drop_database_core(ms, dbName, deleteData, cascade);
+ return Boolean.TRUE;
+ }
+ });
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (InvalidOperationException e) {
+ throw e;
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("drop_database", success);
+ }
+ }
+
+ public List get_databases(final String pattern) throws MetaException {
+ startFunction("get_databases", ": " + pattern);
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ return ms.getDatabases(pattern);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_databases", ret != null);
+ }
+ return ret;
+ }
+
+ public List get_all_databases() throws MetaException {
+ startFunction("get_all_databases");
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ return ms.getAllDatabases();
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_all_databases", ret != null);
+ }
+ return ret;
+ }
+
+ private void create_type_core(final RawStore ms, final Type type)
+ throws AlreadyExistsException, MetaException, InvalidObjectException {
+ if (!MetaStoreUtils.validateName(type.getName())) {
+ throw new InvalidObjectException("Invalid type name");
+ }
+
+ boolean success = false;
+ try {
+ ms.openTransaction();
+ if (is_type_exists(ms, type.getName())) {
+ throw new AlreadyExistsException("Type " + type.getName() + " already exists");
+ }
+ ms.createType(type);
+ success = ms.commitTransaction();
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ }
+ }
+ }
+
+ public boolean create_type(final Type type) throws AlreadyExistsException,
+ MetaException, InvalidObjectException {
+ startFunction("create_type", ": " + type.getName());
+ boolean ret = false;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ create_type_core(ms, type);
+ return Boolean.TRUE;
+ }
+ });
+ } catch (AlreadyExistsException e) {
+ throw e;
+ } catch (MetaException e) {
+ throw e;
+ } catch (InvalidObjectException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("create_type", ret);
+ }
+
+ return ret;
+ }
+
+ public Type get_type(final String name) throws MetaException, NoSuchObjectException {
+ startFunction("get_type", ": " + name);
+
+ Type ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Type run(RawStore ms) throws Exception {
+ Type type = ms.getType(name);
+ if (null == type) {
+ throw new NoSuchObjectException("Type \"" + name + "\" not found.");
+ }
+ return type;
+ }
+ });
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_type", ret != null);
+ }
+ return ret;
+ }
+
+ private boolean is_type_exists(RawStore ms, String typeName)
+ throws MetaException {
+ return (ms.getType(typeName) != null);
+ }
+
+ private void drop_type_core(final RawStore ms, String typeName)
+ throws NoSuchObjectException, MetaException {
+ boolean success = false;
+ try {
+ ms.openTransaction();
+ // drop any partitions
+ if (!is_type_exists(ms, typeName)) {
+ throw new NoSuchObjectException(typeName + " doesn't exist");
+ }
+ if (!ms.dropType(typeName)) {
+ throw new MetaException("Unable to drop type " + typeName);
+ }
+ success = ms.commitTransaction();
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ }
+ }
+ }
+
+
+ public boolean drop_type(final String name) throws MetaException {
+ startFunction("drop_type", ": " + name);
+
+ boolean ret = false;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ // TODO:pc validate that there are no types that refer to this
+ return ms.dropType(name);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("drop_type", ret);
+ }
+ return ret;
+ }
+
+ public Map get_type_all(String name) throws MetaException {
+ // TODO Auto-generated method stub
+ startFunction("get_type_all", ": " + name);
+ endFunction("get_type_all", false);
+ throw new MetaException("Not yet implemented");
+ }
+
+ private void create_table_core(final RawStore ms, final Table tbl)
+ throws AlreadyExistsException, MetaException, InvalidObjectException, NoSuchObjectException {
+
+ if (!MetaStoreUtils.validateName(tbl.getTableName())
+ || !MetaStoreUtils.validateColNames(tbl.getSd().getCols())
+ || (tbl.getPartitionKeys() != null && !MetaStoreUtils
+ .validateColNames(tbl.getPartitionKeys()))) {
+ throw new InvalidObjectException(tbl.getTableName()
+ + " is not a valid object name");
+ }
+
+ Path tblPath = null;
+ boolean success = false, madeDir = false;
+ try {
+ ms.openTransaction();
+
+ if(ms.getDatabase(tbl.getDbName()) == null){
+ throw new NoSuchObjectException("The database " + tbl.getDbName() + " does not exist");
+ }
+
+ // get_table checks whether database exists, it should be moved here
+ if (is_table_exists(ms, tbl.getDbName(), tbl.getTableName())) {
+ throw new AlreadyExistsException("Table " + tbl.getTableName()
+ + " already exists");
+ }
+
+ if (!TableType.VIRTUAL_VIEW.toString().equals(tbl.getTableType())) {
+ if (tbl.getSd().getLocation() == null
+ || tbl.getSd().getLocation().isEmpty()) {
+ tblPath = wh.getTablePath(
+ ms.getDatabase(tbl.getDbName()), tbl.getTableName());
+ } else {
+ if (!isExternal(tbl) && !MetaStoreUtils.isNonNativeTable(tbl)) {
+ LOG.warn("Location: " + tbl.getSd().getLocation()
+ + " specified for non-external table:" + tbl.getTableName());
+ }
+ tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation()));
+ }
+ tbl.getSd().setLocation(tblPath.toString());
+ }
+
+ if (tblPath != null) {
+ if (!wh.isDir(tblPath)) {
+ if (!wh.mkdirs(tblPath)) {
+ throw new MetaException(tblPath
+ + " is not a directory or unable to create one");
+ }
+ madeDir = true;
+ }
+ }
+
+ // set create time
+ long time = System.currentTimeMillis() / 1000;
+ tbl.setCreateTime((int) time);
+ if (tbl.getParameters() == null ||
+ tbl.getParameters().get(Constants.DDL_TIME) == null) {
+ tbl.putToParameters(Constants.DDL_TIME, Long.toString(time));
+ }
+ ms.createTable(tbl);
+ success = ms.commitTransaction();
+
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ if (madeDir) {
+ wh.deleteDir(tblPath, true);
+ }
+ }
+ for (MetaStoreEventListener listener : listeners) {
+ listener.onCreateTable(new CreateTableEvent(tbl, success, this));
+ }
+ }
+ }
+
+ public void create_table(final Table tbl) throws AlreadyExistsException,
+ MetaException, InvalidObjectException {
+ startFunction("create_table", ": db=" + tbl.getDbName() + " tbl="
+ + tbl.getTableName());
+ boolean success = false;
+ try {
+ success = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ create_table_core(ms, tbl);
+ return Boolean.TRUE;
+ }
+ });
+ } catch (AlreadyExistsException e) {
+ throw e;
+ } catch (MetaException e) {
+ throw e;
+ } catch (InvalidObjectException e) {
+ throw e;
+ }catch (NoSuchObjectException e) {
+ throw new InvalidObjectException(e.getMessage());
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("create_table", success);
+ }
+ }
+
+ private boolean is_table_exists(RawStore ms, String dbname, String name)
+ throws MetaException {
+ return (ms.getTable(dbname, name) != null);
+ }
+
+ private void drop_table_core(final RawStore ms, final String dbname,
+ final String name, final boolean deleteData)
+ throws NoSuchObjectException, MetaException, IOException {
+
+ boolean success = false;
+ boolean isExternal = false;
+ Path tblPath = null;
+ Table tbl = null;
+ isExternal = false;
+ boolean isIndexTable = false;
+ try {
+ ms.openTransaction();
+ // drop any partitions
+ tbl = get_table(dbname, name);
+ if (tbl == null) {
+ throw new NoSuchObjectException(name + " doesn't exist");
+ }
+ if (tbl.getSd() == null) {
+ throw new MetaException("Table metadata is corrupted");
+ }
+
+ isIndexTable = isIndexTable(tbl);
+ if (isIndexTable) {
+ throw new RuntimeException(
+ "The table " + name + " is an index table. Please do drop index instead.");
+ }
+
+ if (!isIndexTable) {
+ try {
+ List indexes = ms.getIndexes(dbname, name, Short.MAX_VALUE);
+ while(indexes != null && indexes.size()>0) {
+ for (Index idx : indexes) {
+ this.drop_index_by_name(dbname, name, idx.getIndexName(), true);
+ }
+ indexes = ms.getIndexes(dbname, name, Short.MAX_VALUE);
+ }
+ } catch (TException e) {
+ throw new MetaException(e.getMessage());
+ }
+ }
+ isExternal = isExternal(tbl);
+ if (tbl.getSd().getLocation() != null) {
+ tblPath = new Path(tbl.getSd().getLocation());
+ if (!wh.isWritable(tblPath.getParent())) {
+ throw new MetaException("Table metadata not deleted since " +
+ tblPath.getParent() + " is not writable by " +
+ hiveConf.getUser());
+ }
+ }
+
+ if (!ms.dropTable(dbname, name)) {
+ throw new MetaException("Unable to drop table");
+ }
+ success = ms.commitTransaction();
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ } else if (deleteData && (tblPath != null) && !isExternal) {
+ wh.deleteDir(tblPath, true);
+ // ok even if the data is not deleted
+ }
+ for(MetaStoreEventListener listener : listeners){
+ listener.onDropTable(new DropTableEvent(tbl, success, this));
+ }
+ }
+ }
+
+ public void drop_table(final String dbname, final String name, final boolean deleteData)
+ throws NoSuchObjectException, MetaException {
+ startTableFunction("drop_table", dbname, name);
+
+ boolean success = false;
+ try {
+ success = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ drop_table_core(ms, dbname, name, deleteData);
+ return Boolean.TRUE;
+ }
+ });
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("drop_table", success);
+ }
+
+ }
+
+ /**
+ * Is this an external table?
+ *
+ * @param table
+ * Check if this table is external.
+ * @return True if the table is external, otherwise false.
+ */
+ private boolean isExternal(Table table) {
+ return MetaStoreUtils.isExternalTable(table);
+ }
+
+ private boolean isIndexTable (Table table) {
+ return MetaStoreUtils.isIndexTable(table);
+ }
+
+ public Table get_table(final String dbname, final String name) throws MetaException,
+ NoSuchObjectException {
+ Table t = null;
+ startTableFunction("get_table", dbname, name);
+ try {
+ t = executeWithRetry(new Command() {
+ @Override
+ public Table run(RawStore ms) throws Exception {
+ Table t = ms.getTable(dbname, name);
+ if (t == null) {
+ throw new NoSuchObjectException(dbname + "." + name
+ + " table not found");
+ }
+ return t;
+ }
+ });
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_table", t != null);
+ }
+ return t;
+ }
+
+ /**
+ * Gets multiple tables from the hive metastore.
+ * @param dbname
+ * The name of the database in which the tables reside
+ * @param names
+ * The names of the tables to get.
+ *
+ * @return A list of tables whose names are in the the list "names" and
+ * are retrievable from the database specified by "dbnames."
+ * There is no guarantee of the order of the returned tables.
+ * If there are duplicate names, only one instance of the table will be returned.
+ * @throws MetaException
+ * @throws InvalidOperationException
+ * @throws UnknownDBException
+ */
+ public List get_table_objects_by_name(final String dbname, final List names)
+ throws MetaException, InvalidOperationException, UnknownDBException {
+ List tables = null;
+ startMultiTableFunction("get_multi_table", dbname, names);
+ try {
+ tables = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ if (dbname == null || dbname.isEmpty()) {
+ throw new UnknownDBException("DB name is null or empty");
+ }
+ if (names == null)
+ {
+ throw new InvalidOperationException(dbname + " cannot find null tables");
+ }
+ List foundTables = ms.getTableObjectsByName(dbname, names);
+ return foundTables;
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (InvalidOperationException e) {
+ throw e;
+ } catch (UnknownDBException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new MetaException(e.toString());
+ } finally {
+ endFunction("get_multi_table", tables != null);
+ }
+ return tables;
+ }
+
+ @Override
+ public List get_table_names_by_filter(
+ final String dbName, final String filter, final short maxTables)
+ throws MetaException, InvalidOperationException, UnknownDBException {
+ List tables = null;
+ startFunction("get_table_names_by_filter", ": db = " + dbName + ", filter = " + filter);
+ try {
+ tables = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ if (dbName == null || dbName.isEmpty()) {
+ throw new UnknownDBException("DB name is null or empty");
+ }
+ if (filter == null) {
+ throw new InvalidOperationException(filter + " cannot apply null filter");
+ }
+ List tables = ms.listTableNamesByFilter(dbName, filter, maxTables);
+ return tables;
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (InvalidOperationException e) {
+ throw e;
+ } catch (UnknownDBException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new MetaException(e.toString());
+ } finally {
+ endFunction("get_table_names_by_filter", tables != null);
+ }
+ return tables;
+ }
+
+ public boolean set_table_parameters(String dbname, String name,
+ Map params) throws NoSuchObjectException, MetaException {
+ endFunction(startTableFunction("set_table_parameters", dbname, name), false);
+ // TODO Auto-generated method stub
+ return false;
+ }
+
+ private Partition append_partition_common(RawStore ms, String dbName, String tableName,
+ List part_vals) throws InvalidObjectException,
+ AlreadyExistsException, MetaException {
+
+ Partition part = new Partition();
+ boolean success = false, madeDir = false;
+ Path partLocation = null;
+ try {
+ ms.openTransaction();
+ part.setDbName(dbName);
+ part.setTableName(tableName);
+ part.setValues(part_vals);
+
+ Table tbl = ms.getTable(part.getDbName(), part.getTableName());
+ if (tbl == null) {
+ throw new InvalidObjectException(
+ "Unable to add partition because table or database do not exist");
+ }
+ if (tbl.getSd().getLocation() == null) {
+ throw new MetaException(
+ "Cannot append a partition to a view");
+ }
+
+ part.setSd(tbl.getSd());
+ partLocation = new Path(tbl.getSd().getLocation(), Warehouse
+ .makePartName(tbl.getPartitionKeys(), part_vals));
+ part.getSd().setLocation(partLocation.toString());
+
+ Partition old_part = null;
+ try {
+ old_part = ms.getPartition(part.getDbName(), part
+ .getTableName(), part.getValues());
+ } catch (NoSuchObjectException e) {
+ // this means there is no existing partition
+ old_part = null;
+ }
+ if (old_part != null) {
+ throw new AlreadyExistsException("Partition already exists:" + part);
+ }
+
+ if (!wh.isDir(partLocation)) {
+ if (!wh.mkdirs(partLocation)) {
+ throw new MetaException(partLocation
+ + " is not a directory or unable to create one");
+ }
+ madeDir = true;
+ }
+
+ // set create time
+ long time = System.currentTimeMillis() / 1000;
+ part.setCreateTime((int) time);
+ part.putToParameters(Constants.DDL_TIME, Long.toString(time));
+
+ success = ms.addPartition(part);
+ if (success) {
+ success = ms.commitTransaction();
+ }
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ if (madeDir) {
+ wh.deleteDir(partLocation, true);
+ }
+ }
+ }
+ return part;
+ }
+
+ public Partition append_partition(final String dbName, final String tableName,
+ final List part_vals) throws InvalidObjectException,
+ AlreadyExistsException, MetaException {
+ startPartitionFunction("append_partition", dbName, tableName, part_vals);
+ if (LOG.isDebugEnabled()) {
+ for (String part : part_vals) {
+ LOG.debug(part);
+ }
+ }
+
+ Partition ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Partition run(RawStore ms) throws Exception {
+ return append_partition_common(ms, dbName, tableName, part_vals);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (InvalidObjectException e) {
+ throw e;
+ } catch (AlreadyExistsException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("append_partition", ret != null);
+ }
+ return ret;
+ }
+
+ private int add_partitions_core(final RawStore ms, final List parts)
+ throws MetaException, InvalidObjectException, AlreadyExistsException {
+ String db = parts.get(0).getDbName();
+ String tbl = parts.get(0).getTableName();
+ logInfo("add_partitions : db=" + db + " tbl=" + tbl);
+
+ boolean success = false;
+ Map addedPartitions = new HashMap();
+ try {
+ ms.openTransaction();
+ for (Partition part : parts) {
+ Entry e = add_partition_core_notxn(ms,part);
+ addedPartitions.put(e.getKey(),e.getValue());
+ }
+ success = true;
+ ms.commitTransaction();
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ for (Entry e : addedPartitions.entrySet()){
+ if (e.getValue()){
+ wh.deleteDir(new Path(e.getKey().getSd().getLocation()), true);
+ // we just created this directory - it's not a case of pre-creation, so we nuke
+ }
+ }
+ }
+ }
+ return parts.size();
+ }
+
+ public int add_partitions(final List parts) throws MetaException,
+ InvalidObjectException, AlreadyExistsException {
+ startFunction("add_partition");
+ if (parts.size() == 0) {
+ return 0;
+ }
+
+ Integer ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Integer run(RawStore ms) throws Exception {
+ int ret = add_partitions_core(ms, parts);
+ return Integer.valueOf(ret);
+ }
+ });
+ } catch (InvalidObjectException e) {
+ throw e;
+ } catch (AlreadyExistsException e) {
+ throw e;
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("add_partition", ret != null);
+ }
+ return ret;
+ }
+
+ /**
+ * An implementation of add_partition_core that does not commit
+ * transaction or rollback transaction as part of its operation
+ * - it is assumed that will be tended to from outside this call
+ * @param ms
+ * @param part
+ * @return
+ * @throws InvalidObjectException
+ * @throws AlreadyExistsException
+ * @throws MetaException
+ */
+ private Entry add_partition_core_notxn(
+ final RawStore ms, final Partition part)
+ throws InvalidObjectException, AlreadyExistsException, MetaException {
+ boolean success = false, madeDir = false;
+ Path partLocation = null;
+ try {
+ Partition old_part = null;
+ try {
+ old_part = ms.getPartition(part.getDbName(), part
+ .getTableName(), part.getValues());
+ } catch(NoSuchObjectException e) {
+ // this means there is no existing partition
+ old_part = null;
+ }
+ if (old_part != null) {
+ throw new AlreadyExistsException("Partition already exists:" + part);
+ }
+ Table tbl = ms.getTable(part.getDbName(), part.getTableName());
+ if (tbl == null) {
+ throw new InvalidObjectException(
+ "Unable to add partition because table or database do not exist");
+ }
+
+ String partLocationStr = null;
+ if (part.getSd() != null) {
+ partLocationStr = part.getSd().getLocation();
+ }
+ if (partLocationStr == null || partLocationStr.isEmpty()) {
+ // set default location if not specified and this is
+ // a physical table partition (not a view)
+ if (tbl.getSd().getLocation() != null) {
+ partLocation = new Path(tbl.getSd().getLocation(), Warehouse
+ .makePartName(tbl.getPartitionKeys(), part.getValues()));
+ }
+
+ } else {
+ if (tbl.getSd().getLocation() == null) {
+ throw new MetaException(
+ "Cannot specify location for a view partition");
+ }
+ partLocation = wh.getDnsPath(new Path(partLocationStr));
+ }
+
+ if (partLocation != null) {
+ part.getSd().setLocation(partLocation.toString());
+
+
+ // Check to see if the directory already exists before calling
+ // mkdirs() because if the file system is read-only, mkdirs will
+ // throw an exception even if the directory already exists.
+ if (!wh.isDir(partLocation)) {
+ if (!wh.mkdirs(partLocation)) {
+ throw new MetaException(partLocation
+ + " is not a directory or unable to create one");
+ }
+ madeDir = true;
+ }
+ }
+
+ // set create time
+ long time = System.currentTimeMillis() / 1000;
+ part.setCreateTime((int) time);
+ if (part.getParameters() == null ||
+ part.getParameters().get(Constants.DDL_TIME) == null) {
+ part.putToParameters(Constants.DDL_TIME, Long.toString(time));
+ }
+
+ // Inherit table properties into partition properties.
+ Map tblParams = tbl.getParameters();
+ String inheritProps = hiveConf.getVar(ConfVars.METASTORE_PART_INHERIT_TBL_PROPS).trim();
+ // Default value is empty string in which case no properties will be inherited.
+ // * implies all properties needs to be inherited
+ Set inheritKeys = new HashSet(Arrays.asList(inheritProps.split(",")));
+ if(inheritKeys.contains("*")){
+ inheritKeys = tblParams.keySet();
+ }
+
+ for (String key : inheritKeys) {
+ String paramVal = tblParams.get(key);
+ if(null != paramVal){ // add the property only if it exists in table properties
+ part.putToParameters(key, paramVal);
+ }
+ }
+
+ success = ms.addPartition(part);
+
+ } finally {
+ if (!success) {
+ if (madeDir) {
+ wh.deleteDir(partLocation, true);
+ }
+ }
+ for(MetaStoreEventListener listener : listeners){
+ listener.onAddPartition(new AddPartitionEvent(part, success, this));
+ }
+ }
+ Map returnVal = new HashMap();
+ returnVal.put(part, madeDir);
+ return returnVal.entrySet().iterator().next();
+ }
+
+ private Partition add_partition_core(final RawStore ms, final Partition part)
+ throws InvalidObjectException, AlreadyExistsException, MetaException {
+ boolean success = false;
+ Partition retPtn = null;
+ try{
+ ms.openTransaction();
+ retPtn = add_partition_core_notxn(ms,part).getKey();
+ // we proceed only if we'd actually succeeded anyway, otherwise,
+ // we'd have thrown an exception
+ success = ms.commitTransaction();
+ }finally{
+ if (!success){
+ ms.rollbackTransaction();
+ }
+ }
+ return retPtn;
+ }
+
+ public Partition add_partition(final Partition part)
+ throws InvalidObjectException, AlreadyExistsException, MetaException {
+ startTableFunction("add_partition", part.getDbName(), part.getTableName());
+
+ Partition ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Partition run(RawStore ms) throws Exception {
+ return add_partition_core(ms, part);
+ }
+ });
+ } catch (InvalidObjectException e) {
+ throw e;
+ } catch (AlreadyExistsException e) {
+ throw e;
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("add_partition", ret != null);
+ }
+ return ret;
+
+ }
+
+ private boolean drop_partition_common(RawStore ms, String db_name, String tbl_name,
+ List part_vals, final boolean deleteData)
+ throws MetaException, NoSuchObjectException, IOException {
+
+ boolean success = false;
+ Path partPath = null;
+ Table tbl = null;
+ Partition part = null;
+ boolean isArchived = false;
+ Path archiveParentDir = null;
+
+ try {
+ ms.openTransaction();
+ part = ms.getPartition(db_name, tbl_name, part_vals);
+
+ if (part == null) {
+ throw new NoSuchObjectException("Partition doesn't exist. "
+ + part_vals);
+ }
+
+ isArchived = MetaStoreUtils.isArchived(part);
+ if (isArchived) {
+ archiveParentDir = MetaStoreUtils.getOriginalLocation(part);
+ if (!wh.isWritable(archiveParentDir.getParent())) {
+ throw new MetaException("Table partition not deleted since " +
+ archiveParentDir.getParent() + " is not writable by " +
+ hiveConf.getUser());
+ }
+ }
+ if (!ms.dropPartition(db_name, tbl_name, part_vals)) {
+ throw new MetaException("Unable to drop partition");
+ }
+ success = ms.commitTransaction();
+ if ((part.getSd() != null) && (part.getSd().getLocation() != null)) {
+ partPath = new Path(part.getSd().getLocation());
+ if (!wh.isWritable(partPath.getParent())) {
+ throw new MetaException("Table partition not deleted since " +
+ partPath.getParent() + " is not writable by " +
+ hiveConf.getUser());
+ }
+ }
+ tbl = get_table(db_name, tbl_name);
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ } else if (deleteData && ((partPath != null) || (archiveParentDir != null))) {
+ if (tbl != null && !isExternal(tbl)) {
+ // Archived partitions have har:/to_har_file as their location.
+ // The original directory was saved in params
+ if (isArchived) {
+ assert(archiveParentDir != null);
+ wh.deleteDir(archiveParentDir, true);
+ } else {
+ assert(partPath != null);
+ wh.deleteDir(partPath, true);
+ }
+ // ok even if the data is not deleted
+ }
+ }
+ for(MetaStoreEventListener listener : listeners){
+ listener.onDropPartition(new DropPartitionEvent(part, success, this));
+ }
+ }
+ return true;
+ }
+ public boolean drop_partition(final String db_name, final String tbl_name,
+ final List part_vals, final boolean deleteData)
+ throws NoSuchObjectException, MetaException, TException {
+ startPartitionFunction("drop_partition", db_name, tbl_name, part_vals);
+ LOG.info("Partition values:" + part_vals);
+
+ boolean ret = false;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ return Boolean.valueOf(
+ drop_partition_common(ms, db_name, tbl_name, part_vals, deleteData));
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (TException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("drop_partition", ret);
+ }
+ return ret;
+
+ }
+
+ public Partition get_partition(final String db_name, final String tbl_name,
+ final List part_vals) throws MetaException, NoSuchObjectException {
+ startPartitionFunction("get_partition", db_name, tbl_name, part_vals);
+
+ Partition ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Partition run(RawStore ms) throws Exception {
+ return ms.getPartition(db_name, tbl_name, part_vals);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_partition", ret != null);
+ }
+ return ret;
+ }
+
+ @Override
+ public Partition get_partition_with_auth(final String db_name,
+ final String tbl_name, final List part_vals,
+ final String user_name, final List group_names)
+ throws MetaException, NoSuchObjectException, TException {
+ startPartitionFunction("get_partition_with_auth", db_name, tbl_name,
+ part_vals);
+
+ Partition ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Partition run(RawStore ms) throws Exception {
+ return ms.getPartitionWithAuth(db_name, tbl_name, part_vals,
+ user_name, group_names);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (Exception e) {
+ assert (e instanceof RuntimeException);
+ throw (RuntimeException) e;
+ } finally {
+ endFunction("get_partition_with_auth", ret != null);
+ }
+ return ret;
+ }
+
+ public List get_partitions(final String db_name, final String tbl_name,
+ final short max_parts) throws NoSuchObjectException, MetaException {
+ startTableFunction("get_partitions", db_name, tbl_name);
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ return ms.getPartitions(db_name, tbl_name, max_parts);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_partitions", ret != null);
+ }
+ return ret;
+
+ }
+
+ @Override
+ public List get_partitions_with_auth(final String dbName,
+ final String tblName, final short maxParts, final String userName,
+ final List groupNames) throws NoSuchObjectException,
+ MetaException, TException {
+ startTableFunction("get_partitions_with_auth", dbName, tblName);
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ return ms.getPartitionsWithAuth(dbName, tblName, maxParts,
+ userName, groupNames);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (Exception e) {
+ assert (e instanceof RuntimeException);
+ throw (RuntimeException) e;
+ } finally {
+ endFunction("get_partitions_with_auth", ret != null);
+ }
+ return ret;
+
+ }
+
+ public List get_partition_names(final String db_name, final String tbl_name,
+ final short max_parts) throws MetaException {
+ startTableFunction("get_partition_names", db_name, tbl_name);
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ return ms.listPartitionNames(db_name, tbl_name, max_parts);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_partition_names", ret != null);
+ }
+ return ret;
+ }
+
+ public void alter_partition(final String db_name, final String tbl_name,
+ final Partition new_part)
+ throws InvalidOperationException, MetaException,
+ TException {
+ rename_partition(db_name, tbl_name, null, new_part);
+ }
+
+ public void rename_partition(final String db_name, final String tbl_name,
+ final List part_vals, final Partition new_part)
+ throws InvalidOperationException, MetaException,
+ TException {
+ startTableFunction("alter_partition", db_name, tbl_name);
+ LOG.info("New partition values:" + new_part.getValues());
+ if (part_vals != null && part_vals.size() > 0) {
+ LOG.info("Old Partition values:" + part_vals);
+ }
+
+ Partition oldPart = null;
+ try {
+ oldPart =
+ executeWithRetry(new Command() {
+ @Override
+ public Partition run(RawStore ms) throws Exception {
+ return alterHandler.alterPartition(ms, wh, db_name, tbl_name, part_vals, new_part);
+ }
+ });
+ for (MetaStoreEventListener listener : listeners) {
+ listener.onAlterPartition(new AlterPartitionEvent(oldPart, new_part, true, this));
+ }
+ } catch (NoSuchObjectException e){
+ //old partition does not exist
+ throw new InvalidOperationException("alter is not possible");
+ } catch (InvalidObjectException e) {
+ throw new InvalidOperationException(e.getMessage());
+ } catch (AlreadyExistsException e) {
+ throw new InvalidOperationException(e.getMessage());
+ } catch (MetaException e) {
+ throw e;
+ } catch (TException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("alter_partition", oldPart != null);
+ }
+ return;
+ }
+
+
+
+ public boolean create_index(Index index_def)
+ throws IndexAlreadyExistsException, MetaException {
+ endFunction(startFunction("create_index"), false);
+ // TODO Auto-generated method stub
+ throw new MetaException("Not yet implemented");
+ }
+
+ public void alter_index(final String dbname, final String base_table_name,
+ final String index_name, final Index newIndex)
+ throws InvalidOperationException, MetaException {
+ startFunction("alter_index", ": db=" + dbname + " base_tbl=" + base_table_name
+ + " idx=" + index_name + " newidx=" + newIndex.getIndexName());
+ newIndex.putToParameters(Constants.DDL_TIME, Long.toString(System
+ .currentTimeMillis() / 1000));
+
+ boolean success = false;
+ try {
+ success = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ ms.alterIndex(dbname, base_table_name, index_name, newIndex);
+ return Boolean.TRUE;
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (InvalidOperationException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("alter_index", false);
+ }
+ return;
+ }
+
+ public String getVersion() throws TException {
+ endFunction(startFunction("getVersion"), true);
+ return "3.0";
+ }
+
+ public void alter_table(final String dbname, final String name, final Table newTable)
+ throws InvalidOperationException, MetaException {
+ startFunction("alter_table", ": db=" + dbname + " tbl=" + name
+ + " newtbl=" + newTable.getTableName());
+
+ // Update the time if it hasn't been specified.
+ if (newTable.getParameters() == null ||
+ newTable.getParameters().get(Constants.DDL_TIME) == null) {
+ newTable.putToParameters(Constants.DDL_TIME, Long.toString(System
+ .currentTimeMillis() / 1000));
+ }
+ boolean success = false;
+ try {
+ Table oldt = get_table(dbname, name);
+ success = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ alterHandler.alterTable(ms, wh, dbname, name, newTable);
+ return Boolean.TRUE;
+ }
+ });
+ for (MetaStoreEventListener listener : listeners) {
+ listener.onAlterTable(new AlterTableEvent(oldt, newTable, success, this));
+ }
+ } catch (MetaException e) {
+ throw e;
+ } catch (InvalidOperationException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ //thrown when the table to be altered does not exist
+ throw new InvalidOperationException(e.getMessage());
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("alter_table", success);
+ }
+ }
+
+ public List get_tables(final String dbname, final String pattern)
+ throws MetaException {
+ startFunction("get_tables", ": db=" + dbname + " pat=" + pattern);
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ return ms.getTables(dbname, pattern);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_tables", ret != null);
+ }
+ return ret;
+ }
+
+ public List get_all_tables(final String dbname) throws MetaException {
+ startFunction("get_all_tables", ": db=" + dbname);
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ return ms.getAllTables(dbname);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_all_tables", ret != null);
+ }
+ return ret;
+ }
+
+ public List get_fields(String db, String tableName)
+ throws MetaException, UnknownTableException, UnknownDBException {
+ startFunction("get_fields", ": db=" + db + "tbl=" + tableName);
+ String[] names = tableName.split("\\.");
+ String base_table_name = names[0];
+
+ Table tbl;
+ List ret = null;
+ try {
+ try {
+ tbl = get_table(db, base_table_name);
+ } catch (NoSuchObjectException e) {
+ throw new UnknownTableException(e.getMessage());
+ }
+ boolean getColsFromSerDe = SerDeUtils.shouldGetColsFromSerDe(
+ tbl.getSd().getSerdeInfo().getSerializationLib());
+ if (!getColsFromSerDe) {
+ ret = tbl.getSd().getCols();
+ } else {
+ try {
+ Deserializer s = MetaStoreUtils.getDeserializer(hiveConf, tbl);
+ ret = MetaStoreUtils.getFieldsFromDeserializer(tableName, s);
+ } catch (SerDeException e) {
+ StringUtils.stringifyException(e);
+ throw new MetaException(e.getMessage());
+ }
+ }
+ } finally {
+ endFunction("get_fields", ret != null);
+ }
+
+ return ret;
+ }
+
+ /**
+ * Return the schema of the table. This function includes partition columns
+ * in addition to the regular columns.
+ *
+ * @param db
+ * Name of the database
+ * @param tableName
+ * Name of the table
+ * @return List of columns, each column is a FieldSchema structure
+ * @throws MetaException
+ * @throws UnknownTableException
+ * @throws UnknownDBException
+ */
+ public List get_schema(String db, String tableName)
+ throws MetaException, UnknownTableException, UnknownDBException {
+ startFunction("get_schema", ": db=" + db + "tbl=" + tableName);
+ boolean success = false;
+ try {
+ String[] names = tableName.split("\\.");
+ String base_table_name = names[0];
+
+ Table tbl;
+ try {
+ tbl = get_table(db, base_table_name);
+ } catch (NoSuchObjectException e) {
+ throw new UnknownTableException(e.getMessage());
+ }
+ List fieldSchemas = get_fields(db, base_table_name);
+
+ if (tbl == null || fieldSchemas == null) {
+ throw new UnknownTableException(tableName + " doesn't exist");
+ }
+
+ if (tbl.getPartitionKeys() != null) {
+ // Combine the column field schemas and the partition keys to create the
+ // whole schema
+ fieldSchemas.addAll(tbl.getPartitionKeys());
+ }
+ success = true;
+ return fieldSchemas;
+ } finally {
+ endFunction("get_schema", success);
+ }
+ }
+
+ public String getCpuProfile(int profileDurationInSec) throws TException {
+ return "";
+ }
+
+ /**
+ * Returns the value of the given configuration variable name. If the
+ * configuration variable with the given name doesn't exist, or if there
+ * were an exception thrown while retrieving the variable, or if name is
+ * null, defaultValue is returned.
+ */
+ public String get_config_value(String name, String defaultValue)
+ throws TException, ConfigValSecurityException {
+ startFunction("get_config_value", ": name=" + name + " defaultValue="
+ + defaultValue);
+ boolean success = false;
+ try {
+ if (name == null) {
+ success = true;
+ return defaultValue;
+ }
+ // Allow only keys that start with hive.*, hdfs.*, mapred.* for security
+ // i.e. don't allow access to db password
+ if (!Pattern.matches("(hive|hdfs|mapred).*", name)) {
+ throw new ConfigValSecurityException("For security reasons, the "
+ + "config key " + name + " cannot be accessed");
+ }
+
+ String toReturn = defaultValue;
+ try {
+ toReturn = hiveConf.get(name, defaultValue);
+ } catch (RuntimeException e) {
+ LOG.error(threadLocalId.get().toString() + ": "
+ + "RuntimeException thrown in get_config_value - msg: "
+ + e.getMessage() + " cause: " + e.getCause());
+ }
+ success = true;
+ return toReturn;
+ } finally {
+ endFunction("get_config_value", success);
+ }
+ }
+
+ private List getPartValsFromName(RawStore ms, String dbName, String tblName,
+ String partName) throws MetaException, InvalidObjectException {
+ // Unescape the partition name
+ LinkedHashMap hm = Warehouse.makeSpecFromName(partName);
+
+ // getPartition expects partition values in a list. use info from the
+ // table to put the partition column values in order
+ Table t = ms.getTable(dbName, tblName);
+ if (t == null) {
+ throw new InvalidObjectException(dbName + "." + tblName
+ + " table not found");
+ }
+
+ List partVals = new ArrayList();
+ for(FieldSchema field : t.getPartitionKeys()) {
+ String key = field.getName();
+ String val = hm.get(key);
+ if (val == null) {
+ throw new InvalidObjectException("incomplete partition name - missing " + key);
+ }
+ partVals.add(val);
+ }
+ return partVals;
+ }
+
+ private Partition get_partition_by_name_core(final RawStore ms, final String db_name,
+ final String tbl_name, final String part_name)
+ throws MetaException, NoSuchObjectException, TException {
+ List partVals = null;
+ try {
+ partVals = getPartValsFromName(ms, db_name, tbl_name, part_name);
+ } catch (InvalidObjectException e) {
+ throw new NoSuchObjectException(e.getMessage());
+ }
+ Partition p = ms.getPartition(db_name, tbl_name, partVals);
+
+ if (p == null) {
+ throw new NoSuchObjectException(db_name + "." + tbl_name
+ + " partition (" + part_name + ") not found");
+ }
+ return p;
+ }
+
+ public Partition get_partition_by_name(final String db_name, final String tbl_name,
+ final String part_name) throws MetaException, NoSuchObjectException, TException {
+
+ startFunction("get_partition_by_name", ": db=" + db_name + " tbl="
+ + tbl_name + " part=" + part_name);
+
+ Partition ret = null;
+
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Partition run(RawStore ms) throws Exception {
+ return get_partition_by_name_core(ms, db_name, tbl_name, part_name);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (TException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_partition_by_name", ret != null);
+ }
+ return ret;
+ }
+
+ public Partition append_partition_by_name(final String db_name, final String tbl_name,
+ final String part_name) throws InvalidObjectException,
+ AlreadyExistsException, MetaException, TException {
+ startFunction("append_partition_by_name", ": db=" + db_name + " tbl="
+ + tbl_name + " part=" + part_name);
+
+ Partition ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Partition run(RawStore ms) throws Exception {
+ List partVals = getPartValsFromName(ms, db_name, tbl_name, part_name);
+ return append_partition_common(ms, db_name, tbl_name, partVals);
+ }
+ });
+ } catch (InvalidObjectException e) {
+ throw e;
+ } catch (AlreadyExistsException e) {
+ throw e;
+ } catch (MetaException e) {
+ throw e;
+ } catch (TException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("append_partition_by_name", ret != null);
+ }
+ return ret;
+ }
+
+ private boolean drop_partition_by_name_core(final RawStore ms,
+ final String db_name, final String tbl_name, final String part_name,
+ final boolean deleteData) throws NoSuchObjectException,
+ MetaException, TException, IOException {
+
+ List partVals = null;
+ try {
+ partVals = getPartValsFromName(ms, db_name, tbl_name, part_name);
+ } catch (InvalidObjectException e) {
+ throw new NoSuchObjectException(e.getMessage());
+ }
+
+ return drop_partition_common(ms, db_name, tbl_name, partVals, deleteData);
+ }
+
+ @Override
+ public boolean drop_partition_by_name(final String db_name, final String tbl_name,
+ final String part_name, final boolean deleteData) throws NoSuchObjectException,
+ MetaException, TException {
+ startFunction("drop_partition_by_name", ": db=" + db_name + " tbl="
+ + tbl_name + " part=" + part_name);
+
+ boolean ret = false;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ return drop_partition_by_name_core(ms, db_name, tbl_name,
+ part_name, deleteData);
+ }
+ });
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (MetaException e) {
+ throw e;
+ } catch (TException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("drop_partition_by_name", ret);
+ }
+
+ return ret;
+ }
+
+ @Override
+ public List get_partitions_ps(final String db_name,
+ final String tbl_name, final List part_vals,
+ final short max_parts) throws MetaException, TException, NoSuchObjectException {
+ startPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals);
+
+ List ret = null;
+ try {
+ ret = get_partitions_ps_with_auth(db_name, tbl_name, part_vals,
+ max_parts, null, null);
+ }
+ finally {
+ endFunction("get_partitions_ps", ret != null);
+ }
+
+ return ret;
+ }
+
+ @Override
+ public List get_partitions_ps_with_auth(final String db_name,
+ final String tbl_name, final List part_vals,
+ final short max_parts, final String userName,
+ final List groupNames) throws MetaException, TException, NoSuchObjectException {
+ startPartitionFunction("get_partitions_ps_with_auth", db_name, tbl_name,
+ part_vals);
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ return ms.listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts,
+ userName, groupNames);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (InvalidObjectException e) {
+ throw new MetaException(e.getMessage());
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_partitions_ps_with_auth", ret != null);
+ }
+ return ret;
+ }
+
+ @Override
+ public List get_partition_names_ps(final String db_name,
+ final String tbl_name, final List part_vals, final short max_parts)
+ throws MetaException, TException, NoSuchObjectException {
+ startPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals);
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ return ms.listPartitionNamesPs(db_name, tbl_name, part_vals, max_parts);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_partitions_names_ps", ret != null);
+ }
+ return ret;
+ }
+
+ @Override
+ public List partition_name_to_vals(String part_name)
+ throws MetaException, TException {
+ if (part_name.length() == 0) {
+ return new ArrayList();
+ }
+ LinkedHashMap map = Warehouse.makeSpecFromName(part_name);
+ List part_vals = new ArrayList();
+ part_vals.addAll(map.values());
+ return part_vals;
+ }
+
+ @Override
+ public Map partition_name_to_spec(String part_name) throws MetaException,
+ TException {
+ if (part_name.length() == 0) {
+ return new HashMap();
+ }
+ return Warehouse.makeSpecFromName(part_name);
+ }
+
+ @Override
+ public Index add_index(final Index newIndex, final Table indexTable)
+ throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
+ startFunction("add_index", ": db=" + newIndex.getDbName() + " tbl="
+ + newIndex.getOrigTableName() + " index=" + newIndex.getIndexName());
+ Index ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Index run(RawStore ms) throws Exception {
+ return add_index_core(ms, newIndex, indexTable);
+ }
+ });
+ } catch (InvalidObjectException e) {
+ throw e;
+ } catch (AlreadyExistsException e) {
+ throw e;
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("add_index", ret != null);
+ }
+ return ret;
+ }
+
+ private Index add_index_core(final RawStore ms, final Index index, final Table indexTable)
+ throws InvalidObjectException, AlreadyExistsException, MetaException {
+
+ boolean success = false, indexTableCreated = false;
+
+ try {
+ ms.openTransaction();
+ Index old_index = null;
+ try {
+ old_index = get_index_by_name(index.getDbName(), index
+ .getOrigTableName(), index.getIndexName());
+ } catch (Exception e) {
+ }
+ if (old_index != null) {
+ throw new AlreadyExistsException("Index already exists:" + index);
+ }
+ Table origTbl = ms.getTable(index.getDbName(), index.getOrigTableName());
+ if (origTbl == null) {
+ throw new InvalidObjectException(
+ "Unable to add index because database or the orginal table do not exist");
+ }
+
+ // set create time
+ long time = System.currentTimeMillis() / 1000;
+ Table indexTbl = indexTable;
+ if (indexTbl != null) {
+ try {
+ indexTbl = ms.getTable(index.getDbName(), index.getIndexTableName());
+ } catch (Exception e) {
+ }
+ if (indexTbl != null) {
+ throw new InvalidObjectException(
+ "Unable to add index because index table already exists");
+ }
+ this.create_table(indexTable);
+ indexTableCreated = true;
+ }
+
+ index.setCreateTime((int) time);
+ index.putToParameters(Constants.DDL_TIME, Long.toString(time));
+
+ ms.addIndex(index);
+ success = ms.commitTransaction();
+ return index;
+ } finally {
+ if (!success) {
+ if (indexTableCreated) {
+ try {
+ this.drop_table(index.getDbName(), index.getIndexTableName(), false);
+ } catch (Exception e) {
+ }
+ }
+ ms.rollbackTransaction();
+ }
+ }
+ }
+
+ @Override
+ public boolean drop_index_by_name(final String dbName, final String tblName,
+ final String indexName, final boolean deleteData) throws NoSuchObjectException,
+ MetaException, TException {
+ startFunction("drop_index_by_name", ": db=" + dbName + " tbl="
+ + tblName + " index=" + indexName);
+
+ boolean ret = false;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ return drop_index_by_name_core(ms, dbName, tblName,
+ indexName, deleteData);
+ }
+ });
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (MetaException e) {
+ throw e;
+ } catch (TException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("drop_index_by_name", ret);
+ }
+
+ return ret;
+ }
+
+ private boolean drop_index_by_name_core(final RawStore ms,
+ final String dbName, final String tblName,
+ final String indexName, final boolean deleteData) throws NoSuchObjectException,
+ MetaException, TException, IOException {
+
+ boolean success = false;
+ Path tblPath = null;
+ try {
+ ms.openTransaction();
+
+ //drop the underlying index table
+ Index index = get_index_by_name(dbName, tblName, indexName);
+ if (index == null) {
+ throw new NoSuchObjectException(indexName + " doesn't exist");
+ }
+ ms.dropIndex(dbName, tblName, indexName);
+
+ String idxTblName = index.getIndexTableName();
+ if (idxTblName != null) {
+ Table tbl = null;
+ tbl = this.get_table(dbName, idxTblName);
+ if (tbl.getSd() == null) {
+ throw new MetaException("Table metadata is corrupted");
+ }
+
+ if (tbl.getSd().getLocation() != null) {
+ tblPath = new Path(tbl.getSd().getLocation());
+ if (!wh.isWritable(tblPath.getParent())) {
+ throw new MetaException("Index table metadata not deleted since " +
+ tblPath.getParent() + " is not writable by " +
+ hiveConf.getUser());
+ }
+ }
+ if (!ms.dropTable(dbName, idxTblName)) {
+ throw new MetaException("Unable to drop underlying data table "
+ + idxTblName + " for index " + idxTblName);
+ }
+ }
+ success = ms.commitTransaction();
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ return false;
+ } else if (deleteData && tblPath != null) {
+ wh.deleteDir(tblPath, true);
+ // ok even if the data is not deleted
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public Index get_index_by_name(final String dbName, final String tblName,
+ final String indexName) throws MetaException, NoSuchObjectException,
+ TException {
+
+ startFunction("get_index_by_name", ": db=" + dbName + " tbl="
+ + tblName + " index=" + indexName);
+
+ Index ret = null;
+
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Index run(RawStore ms) throws Exception {
+ return get_index_by_name_core(ms, dbName, tblName, indexName);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (TException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("drop_index_by_name", ret != null);
+ }
+ return ret;
+ }
+
+ private Index get_index_by_name_core(final RawStore ms, final String db_name,
+ final String tbl_name, final String index_name)
+ throws MetaException, NoSuchObjectException, TException {
+ Index index = ms.getIndex(db_name, tbl_name, index_name);
+
+ if (index == null) {
+ throw new NoSuchObjectException(db_name + "." + tbl_name
+ + " index=" + index_name + " not found");
+ }
+ return index;
+ }
+
+ @Override
+ public List get_index_names(final String dbName, final String tblName,
+ final short maxIndexes) throws MetaException, TException {
+ startTableFunction("get_index_names", dbName, tblName);
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ return ms.listIndexNames(dbName, tblName, maxIndexes);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_index_names", ret != null);
+ }
+ return ret;
+ }
+
+ @Override
+ public List get_indexes(final String dbName, final String tblName,
+ final short maxIndexes) throws NoSuchObjectException, MetaException,
+ TException {
+ startTableFunction("get_indexes", dbName, tblName);
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ return ms.getIndexes(dbName, tblName, maxIndexes);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_indexes", ret != null);
+ }
+ return ret;
+ }
+
+ @Override
+ public List get_partitions_by_filter(final String dbName,
+ final String tblName, final String filter, final short maxParts)
+ throws MetaException, NoSuchObjectException, TException {
+ startTableFunction("get_partitions_by_filter", dbName, tblName);
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ return ms.getPartitionsByFilter(dbName, tblName, filter, maxParts);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_partitions_by_filter", ret != null);
+ }
+ return ret;
+ }
+
+ @Override
+ public List get_partitions_by_names(final String dbName,
+ final String tblName, final List partNames)
+ throws MetaException, NoSuchObjectException, TException {
+
+ startTableFunction("get_partitions_by_names", dbName, tblName);
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ return ms.getPartitionsByNames(dbName, tblName, partNames);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (NoSuchObjectException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ } finally {
+ endFunction("get_partitions_by_names", ret != null);
+ }
+ return ret;
+ }
+
+ @Override
+ public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject,
+ String userName, List groupNames) throws MetaException,
+ TException {
+ if (hiveObject.getObjectType() == HiveObjectType.COLUMN) {
+ String partName = getPartName(hiveObject);
+ return this.get_column_privilege_set(hiveObject.getDbName(), hiveObject
+ .getObjectName(), partName, hiveObject.getColumnName(), userName,
+ groupNames);
+ } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) {
+ String partName = getPartName(hiveObject);
+ return this.get_partition_privilege_set(hiveObject.getDbName(),
+ hiveObject.getObjectName(), partName, userName, groupNames);
+ } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) {
+ return this.get_db_privilege_set(hiveObject.getDbName(), userName,
+ groupNames);
+ } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) {
+ return this.get_table_privilege_set(hiveObject.getDbName(), hiveObject
+ .getObjectName(), userName, groupNames);
+ } else if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) {
+ return this.get_user_privilege_set(userName, groupNames);
+ }
+ return null;
+ }
+
+ private String getPartName(HiveObjectRef hiveObject) throws MetaException {
+ String partName = null;
+ List partValue = hiveObject.getPartValues();
+ if (partValue != null && partValue.size() > 0) {
+ try {
+ Table table = get_table(hiveObject.getDbName(), hiveObject
+ .getObjectName());
+ partName = Warehouse
+ .makePartName(table.getPartitionKeys(), partValue);
+ } catch (NoSuchObjectException e) {
+ throw new MetaException(e.getMessage());
+ }
+ }
+ return partName;
+ }
+
+ public PrincipalPrivilegeSet get_column_privilege_set(final String dbName,
+ final String tableName, final String partName, final String columnName,
+ final String userName, final List groupNames) throws MetaException,
+ TException {
+ incrementCounter("get_column_privilege_set");
+
+ PrincipalPrivilegeSet ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public PrincipalPrivilegeSet run(RawStore ms) throws Exception {
+ return ms.getColumnPrivilegeSet(
+ dbName, tableName, partName, columnName, userName, groupNames);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ public PrincipalPrivilegeSet get_db_privilege_set(final String dbName,
+ final String userName, final List groupNames) throws MetaException,
+ TException {
+ incrementCounter("get_db_privilege_set");
+
+ PrincipalPrivilegeSet ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public PrincipalPrivilegeSet run(RawStore ms) throws Exception {
+ return ms.getDBPrivilegeSet(dbName, userName, groupNames);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ public PrincipalPrivilegeSet get_partition_privilege_set(
+ final String dbName, final String tableName, final String partName,
+ final String userName, final List groupNames)
+ throws MetaException, TException {
+ incrementCounter("get_partition_privilege_set");
+
+ PrincipalPrivilegeSet ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public PrincipalPrivilegeSet run(RawStore ms) throws Exception {
+ return ms.getPartitionPrivilegeSet(dbName, tableName, partName,
+ userName, groupNames);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ public PrincipalPrivilegeSet get_table_privilege_set(final String dbName,
+ final String tableName, final String userName,
+ final List groupNames) throws MetaException, TException {
+ incrementCounter("get_table_privilege_set");
+
+ PrincipalPrivilegeSet ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public PrincipalPrivilegeSet run(RawStore ms) throws Exception {
+ return ms.getTablePrivilegeSet(dbName, tableName, userName,
+ groupNames);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ @Override
+ public boolean grant_role(final String roleName,
+ final String userName, final PrincipalType principalType,
+ final String grantor, final PrincipalType grantorType, final boolean grantOption)
+ throws MetaException, TException {
+ incrementCounter("add_role_member");
+
+ Boolean ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ Role role = ms.getRole(roleName);
+ return ms.grantRole(role, userName, principalType, grantor, grantorType, grantOption);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ public List list_roles(final String principalName,
+ final PrincipalType principalType) throws MetaException, TException {
+ incrementCounter("list_roles");
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ List result = new ArrayList();
+ List roleMap = ms.listRoles(principalName, principalType);
+ if (roleMap!=null) {
+ for (MRoleMap role : roleMap) {
+ MRole r = role.getRole();
+ result.add(new Role(r.getRoleName(), r
+ .getCreateTime(), r.getOwnerName()));
+ }
+ }
+ return result;
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ @Override
+ public boolean create_role(final Role role)
+ throws MetaException, TException {
+ incrementCounter("create_role");
+
+ Boolean ret = null;
+ try {
+
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ return ms.addRole(role.getRoleName(), role.getOwnerName());
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ @Override
+ public boolean drop_role(final String roleName)
+ throws MetaException, TException {
+ incrementCounter("drop_role");
+
+ Boolean ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ return ms.removeRole(roleName);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ @Override
+ public List get_role_names() throws MetaException, TException {
+ incrementCounter("get_role_names");
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ return ms.listRoleNames();
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ assert(e instanceof RuntimeException);
+ throw (RuntimeException)e;
+ }
+ return ret;
+ }
+
+ @Override
+ public boolean grant_privileges(final PrivilegeBag privileges) throws MetaException,
+ TException {
+ incrementCounter("grant_privileges");
+
+ Boolean ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ return ms.grantPrivileges(privileges);
+ }
+ });
+ } catch (MetaException e) {
+ e.printStackTrace();
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ @Override
+ public boolean revoke_role(final String roleName, final String userName,
+ final PrincipalType principalType) throws MetaException, TException {
+ incrementCounter("remove_role_member");
+
+ Boolean ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ Role mRole = ms.getRole(roleName);
+ return ms.revokeRole(mRole, userName, principalType);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ @Override
+ public boolean revoke_privileges(final PrivilegeBag privileges)
+ throws MetaException, TException {
+ incrementCounter("revoke_privileges");
+
+ Boolean ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public Boolean run(RawStore ms) throws Exception {
+ return ms.revokePrivileges(privileges);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ public PrincipalPrivilegeSet get_user_privilege_set(final String userName,
+ final List groupNames) throws MetaException, TException {
+ incrementCounter("get_user_privilege_set");
+
+ PrincipalPrivilegeSet ret = null;
+ try {
+ ret = executeWithRetry(new Command() {
+ @Override
+ public PrincipalPrivilegeSet run(RawStore ms) throws Exception {
+ return ms.getUserPrivilegeSet(userName, groupNames);
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ public PrincipalType getPrincipalType (String principalType) {
+ return PrincipalType.valueOf(principalType);
+ }
+
+ @Override
+ public List list_privileges(String principalName,
+ PrincipalType principalType, HiveObjectRef hiveObject)
+ throws MetaException, TException {
+ if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) {
+ return this.list_global_privileges(principalName, principalType);
+ } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) {
+ return this.list_db_privileges(principalName, principalType, hiveObject
+ .getDbName());
+ } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) {
+ return this.list_table_privileges(principalName, principalType,
+ hiveObject.getDbName(), hiveObject.getObjectName());
+ } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) {
+ return this.list_partition_privileges(principalName, principalType,
+ hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject
+ .getPartValues());
+ } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) {
+ return this.list_column_privileges(principalName, principalType,
+ hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject
+ .getPartValues(), hiveObject.getColumnName());
+ }
+ return null;
+ }
+
+ public List list_column_privileges(
+ final String principalName, final PrincipalType principalType,
+ final String dbName, final String tableName, final List partValues,
+ final String columnName) throws MetaException, TException {
+ incrementCounter("list_security_column_grant");
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ String partName = null;
+ if (partValues != null && partValues.size()>0) {
+ Table tbl = get_table(dbName, tableName);
+ partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues);
+ }
+
+ List result = Collections.emptyList();
+
+ if (partName != null) {
+ Partition part = null;
+ part = get_partition_by_name(dbName, tableName, partName);
+ List mPartitionCols
+ = ms.listPrincipalPartitionColumnGrants(principalName,
+ principalType, dbName, tableName, partName, columnName);
+ if (mPartitionCols.size() > 0) {
+ result = new ArrayList();
+ for (int i = 0; i < mPartitionCols.size(); i++) {
+ MPartitionColumnPrivilege sCol = mPartitionCols.get(i);
+ HiveObjectRef objectRef = new HiveObjectRef(
+ HiveObjectType.COLUMN, dbName, tableName,
+ part == null ? null : part.getValues(), sCol
+ .getColumnName());
+ HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef,
+ sCol.getPrincipalName(), principalType,
+ new PrivilegeGrantInfo(sCol.getPrivilege(), sCol
+ .getCreateTime(), sCol.getGrantor(), PrincipalType
+ .valueOf(sCol.getGrantorType()), sCol.getGrantOption()));
+ result.add(secObj);
+ }
+ }
+ } else {
+ List mTableCols = ms
+ .listPrincipalTableColumnGrants(principalName, principalType,
+ dbName, tableName, columnName);
+ if (mTableCols.size() > 0) {
+ result = new ArrayList();
+ for (int i = 0; i < mTableCols.size(); i++) {
+ MTableColumnPrivilege sCol = mTableCols.get(i);
+ HiveObjectRef objectRef = new HiveObjectRef(
+ HiveObjectType.COLUMN, dbName, tableName, null, sCol
+ .getColumnName());
+ HiveObjectPrivilege secObj = new HiveObjectPrivilege(
+ objectRef, sCol.getPrincipalName(), principalType,
+ new PrivilegeGrantInfo(sCol.getPrivilege(), sCol
+ .getCreateTime(), sCol.getGrantor(), PrincipalType
+ .valueOf(sCol.getGrantorType()), sCol
+ .getGrantOption()));
+ result.add(secObj);
+ }
+ }
+ }
+
+ return result;
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ public List list_db_privileges(final String principalName,
+ final PrincipalType principalType, final String dbName)
+ throws MetaException, TException {
+ incrementCounter("list_security_db_grant");
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ List mDbs = ms.listPrincipalDBGrants(
+ principalName, principalType, dbName);
+ if (mDbs.size() > 0) {
+ List result = new ArrayList();
+ for (int i = 0; i < mDbs.size(); i++) {
+ MDBPrivilege sDB = mDbs.get(i);
+ HiveObjectRef objectRef = new HiveObjectRef(
+ HiveObjectType.DATABASE, dbName, null, null, null);
+ HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef,
+ sDB.getPrincipalName(), principalType,
+ new PrivilegeGrantInfo(sDB.getPrivilege(), sDB
+ .getCreateTime(), sDB.getGrantor(), PrincipalType
+ .valueOf(sDB.getGrantorType()), sDB.getGrantOption()));
+ result.add(secObj);
+ }
+ return result;
+ }
+ return Collections.emptyList();
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ public List list_partition_privileges(
+ final String principalName, final PrincipalType principalType,
+ final String dbName, final String tableName, final List partValues)
+ throws MetaException, TException {
+ incrementCounter("list_security_partition_grant");
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ Table tbl = get_table(dbName, tableName);
+ String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues);
+ List mParts = ms.listPrincipalPartitionGrants(
+ principalName, principalType, dbName, tableName, partName);
+ if (mParts.size() > 0) {
+ List result = new ArrayList();
+ for (int i = 0; i < mParts.size(); i++) {
+ MPartitionPrivilege sPart = mParts.get(i);
+ HiveObjectRef objectRef = new HiveObjectRef(
+ HiveObjectType.PARTITION, dbName, tableName, partValues,
+ null);
+ HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef,
+ sPart.getPrincipalName(), principalType,
+ new PrivilegeGrantInfo(sPart.getPrivilege(), sPart
+ .getCreateTime(), sPart.getGrantor(), PrincipalType
+ .valueOf(sPart.getGrantorType()), sPart
+ .getGrantOption()));
+
+ result.add(secObj);
+ }
+ return result;
+ }
+ return Collections.emptyList();
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ public List list_table_privileges(
+ final String principalName, final PrincipalType principalType,
+ final String dbName, final String tableName) throws MetaException,
+ TException {
+ incrementCounter("list_security_table_grant");
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ List mTbls = ms
+ .listAllTableGrants(principalName, principalType, dbName, tableName);
+ if (mTbls.size() > 0) {
+ List result = new ArrayList();
+ for (int i = 0; i < mTbls.size(); i++) {
+ MTablePrivilege sTbl = mTbls.get(i);
+ HiveObjectRef objectRef = new HiveObjectRef(
+ HiveObjectType.TABLE, dbName, tableName, null, null);
+ HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef,
+ sTbl.getPrincipalName(), principalType,
+ new PrivilegeGrantInfo(sTbl.getPrivilege(), sTbl.getCreateTime(), sTbl
+ .getGrantor(), PrincipalType.valueOf(sTbl
+ .getGrantorType()), sTbl.getGrantOption()));
+ result.add(secObj);
+ }
+ return result;
+ }
+ return Collections.emptyList();
+ }
+ });
+ } catch (MetaException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return ret;
+ }
+
+ public List list_global_privileges(
+ final String principalName, final PrincipalType principalType)
+ throws MetaException, TException {
+ incrementCounter("list_security_user_grant");
+
+ List ret = null;
+ try {
+ ret = executeWithRetry(new Command>() {
+ @Override
+ public List run(RawStore ms) throws Exception {
+ List mUsers = ms.listPrincipalGlobalGrants(
+ principalName, principalType);
+ if (mUsers.size() > 0) {
+ List