diff --git beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java index 84963af10e..7c65fe1e9b 100644 --- beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java +++ beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java @@ -806,39 +806,9 @@ boolean validateSchemaTables(Connection conn) throws HiveMetaException { Matcher matcher = null; Pattern regexp = null; List subs = new ArrayList(); - int groupNo = 0; + int groupNo = 2; - switch (dbType) { - case HiveSchemaHelper.DB_ORACLE: - regexp = Pattern.compile("(CREATE TABLE(IF NOT EXISTS)*) (\\S+).*"); - groupNo = 3; - break; - - case HiveSchemaHelper.DB_MYSQL: - regexp = Pattern.compile("(CREATE TABLE) (\\S+).*"); - groupNo = 2; - break; - - case HiveSchemaHelper.DB_MSSQL: - regexp = Pattern.compile("(CREATE TABLE) (\\S+).*"); - groupNo = 2; - break; - - case HiveSchemaHelper.DB_DERBY: - regexp = Pattern.compile("(CREATE TABLE(IF NOT EXISTS)*) (\\S+).*"); - groupNo = 3; - break; - - case HiveSchemaHelper.DB_POSTGRACE: - regexp = Pattern.compile("(CREATE TABLE(IF NOT EXISTS)*) (\\S+).*"); - groupNo = 3; - break; - - default: - regexp = Pattern.compile("(CREATE TABLE(IF NOT EXISTS)*) (\\S+).*"); - groupNo = 3; - break; - } + regexp = Pattern.compile("CREATE TABLE(\\s+IF NOT EXISTS)?\\s+(\\S+).*"); if (!(new File(path)).exists()) { throw new Exception(path + " does not exist. Potentially incorrect version in the metastore VERSION table"); @@ -867,7 +837,7 @@ boolean validateSchemaTables(Connection conn) throws HiveMetaException { if (matcher.find()) { String table = matcher.group(groupNo); if (dbType.equals("derby")) - table = table.replaceAll("APP.",""); + table = table.replaceAll("APP\\.",""); tableList.add(table.toLowerCase()); LOG.debug("Found table " + table + " in the schema"); } diff --git metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql new file mode 100644 index 0000000000..16190dc0a1 --- /dev/null +++ metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql @@ -0,0 +1,25 @@ +CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL); +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NAME"); +ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID"); + +CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, PARENT_POOL_ID BIGINT, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER); +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH"); +ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID"); +ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; +ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024)); +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_TRIGGER" ON "APP"."WM_TRIGGER" ("RP_ID", "NAME"); +ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID"); +ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +CREATE TABLE "APP"."WM_POOL_TO_TRIGGER" (POOL_ID BIGINT NOT NULL, TRIGGER_ID BIGINT NOT NULL); +ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_PK" PRIMARY KEY ("POOL_ID", "TRIGGER_ID"); +ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; +ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "APP"."WM_TRIGGER" ("TRIGGER_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, ENTITY_TYPE VARCHAR(10) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID BIGINT NOT NULL, ORDERING INTEGER); +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME"); +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_PK" PRIMARY KEY ("MAPPING_ID"); +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; diff --git metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql index 7b8bef1c09..4c35380af4 100644 --- metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql +++ metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql @@ -110,6 +110,16 @@ CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NULL, "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, "DESCRIPTION" VARCHAR(1000)); +CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID INTEGER NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL); + +CREATE TABLE "APP"."WM_POOL" (POOL_ID INTEGER NOT NULL, RP_ID INTEGER NOT NULL, PATH VARCHAR(1024) NOT NULL, PARENT_POOL_ID INTEGER, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER); + +CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID INTEGER NOT NULL, RP_ID INTEGER NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024)); + +CREATE TABLE "APP"."WM_POOL_TO_TRIGGER" (POOL_ID INTEGER NOT NULL, TRIGGER_ID INTEGER NOT NULL); + +CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID INTEGER NOT NULL, RP_ID INTEGER NOT NULL, ENTITY_TYPE VARCHAR(10) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID INTEGER NOT NULL, ORDERING INTEGER); + -- ---------------------------------------------- -- DML Statements -- ---------------------------------------------- @@ -158,6 +168,14 @@ CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"( CREATE INDEX "APP"."CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "APP"."KEY_CONSTRAINTS"("CONSTRAINT_TYPE"); +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NAME"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_TRIGGER" ON "APP"."WM_TRIGGER" ("RP_ID", "NAME"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME"); + -- ---------------------------------------------- -- DDL Statements for keys -- ---------------------------------------------- @@ -332,6 +350,28 @@ ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFER ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; +ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID"); + +ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID"); + +ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID"); + +ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "APP"."WM_TRIGGER" ("TRIGGER_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_PK" PRIMARY KEY ("MAPPING_ID"); + +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + -- ---------------------------------------------- -- DDL Statements for checks -- ---------------------------------------------- @@ -349,4 +389,3 @@ RUN 'hive-txn-schema-3.0.0.derby.sql'; -- Record schema version. Should be the last step in the init script -- ----------------------------------------------------------------- INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '3.0.0', 'Hive release version 3.0.0'); - diff --git metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql index 756c9c10e6..1f2647dfbf 100644 --- metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql +++ metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql @@ -4,5 +4,6 @@ RUN '042-HIVE-16575.derby.sql'; RUN '043-HIVE-16922.derby.sql'; RUN '044-HIVE-16997.derby.sql'; RUN '045-HIVE-16886.derby.sql'; +RUN '046-HIVE-17566.derby.sql'; UPDATE "APP".VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; diff --git metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql index 2db7e7de0a..ca7af06e7a 100644 --- metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql +++ metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql @@ -947,6 +947,23 @@ SELECT max(CASE `PARAM_KEY` WHEN 'transient_lastDdlTime' THEN `PARAM_VALUE` END) AS TRANSIENT_LAST_DDL_TIME FROM `PARTITION_PARAMS` GROUP BY `PART_ID`; +CREATE TABLE IF NOT EXISTS `WM_RESOURCEPLANS` ( + `NAME` string, + `STATUS` string, + `QUERY_PARALLELISM` int +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"NAME\", + \"STATUS\", + \"QUERY_PARALLELISM\" +FROM + \"WM_RESOURCEPLAN\"" +); + DROP DATABASE IF EXISTS INFORMATION_SCHEMA; CREATE DATABASE INFORMATION_SCHEMA; diff --git metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql new file mode 100644 index 0000000000..0ee0f758f1 --- /dev/null +++ metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql @@ -0,0 +1,76 @@ +CREATE TABLE WM_RESOURCEPLAN +( + RP_ID bigint NOT NULL, + "NAME" nvarchar(128) NOT NULL, + QUERY_PARALLELISM int, + STATUS nvarchar(20) NOT NULL +); + +ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME"); + + +CREATE TABLE WM_POOL +( + POOL_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + PATH nvarchar(1024) NOT NULL, + PARENT_POOL_ID bigint, + ALLOC_FRACTION DOUBLE, + QUERY_PARALLELISM int +); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, "NAME"); +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID); + + +CREATE TABLE WM_TRIGGER +( + TRIGGER_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + "NAME" nvarchar(128) NOT NULL, + TRIGGER_EXPRESSION nvarchar(1024), + ACTION_EXPRESSION nvarchar(1024) +); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME"); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + + +CREATE TABLE WM_POOL_TO_TRIGGER +( + POOL_ID bigint NOT NULL, + TRIGGER_ID bigint NOT NULL +); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID); + + +CREATE TABLE WM_MAPPING +( + MAPPING_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + ENTITY_TYPE nvarchar(10) NOT NULL, + ENTITY_NAME nvarchar(128) NOT NULL, + POOL_ID bigint NOT NULL, + ORDERING int +); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); diff --git metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql index 3c0169dcea..86f3e5c4a9 100644 --- metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql +++ metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql @@ -595,7 +595,60 @@ CREATE TABLE NOTIFICATION_SEQUENCE ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID); -INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 WHERE NOT EXISTS (SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE); +-- Tables to manage resource plans. + +CREATE TABLE WM_RESOURCEPLAN +( + RP_ID bigint NOT NULL, + "NAME" nvarchar(128) NOT NULL, + QUERY_PARALLELISM int, + STATUS nvarchar(20) NOT NULL +); + +ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID); + +CREATE TABLE WM_POOL +( + POOL_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + PATH nvarchar(1024) NOT NULL, + PARENT_POOL_ID bigint, + ALLOC_FRACTION DOUBLE, + QUERY_PARALLELISM int +); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID); + +CREATE TABLE WM_TRIGGER +( + TRIGGER_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + "NAME" nvarchar(128) NOT NULL, + TRIGGER_EXPRESSION nvarchar(1024), + ACTION_EXPRESSION nvarchar(1024) +); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID); + +CREATE TABLE WM_POOL_TO_TRIGGER +( + POOL_ID bigint NOT NULL, + TRIGGER_ID bigint NOT NULL +); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID); + +CREATE TABLE WM_MAPPING +( + MAPPING_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + ENTITY_TYPE nvarchar(10) NOT NULL, + ENTITY_NAME nvarchar(128) NOT NULL, + POOL_ID bigint NOT NULL, + ORDERING int +); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID); -- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey] @@ -856,6 +909,29 @@ ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) RE CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID); +-- Constraints for resource plan tables. + +CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME"); + +CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME"); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); -- ----------------------------------------------------------------------------------------------------------------------------------------------- diff --git metastore/scripts/upgrade/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql metastore/scripts/upgrade/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql index cca8426e0d..864a5e5bd5 100644 --- metastore/scripts/upgrade/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql +++ metastore/scripts/upgrade/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql @@ -5,6 +5,7 @@ SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS MESSAGE; :r 028-HIVE-16922.mssql.sql :r 029-HIVE-16997.mssql.sql :r 030-HIVE-16886.mssql.sql +:r 031-HIVE-17566.mssql.sql UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS MESSAGE; diff --git metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql new file mode 100644 index 0000000000..25dfbe95e5 --- /dev/null +++ metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql @@ -0,0 +1,57 @@ +CREATE TABLE WM_RESOURCEPLAN ( + `RP_ID` bigint(20) NOT NULL, + `NAME` varchar(128) NOT NULL, + `QUERY_PARALLELISM` int(11), + `STATUS` varchar(20) NOT NULL, + PRIMARY KEY (`RP_ID`), + KEY `UNIQUE_WM_RESOURCEPLAN` (`NAME`), +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE WM_POOL +( + `POOL_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `PATH` varchar(1024) NOT NULL, + `PARENT_POOL_ID` bigint(20), + `ALLOC_FRACTION` DOUBLE, + `QUERY_PARALLELISM` int(11), + PRIMARY KEY (`POOL_ID`), + KEY `UNIQUE_WM_POOL` (`RP_ID`, `PATH`), + CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`), + CONSTRAINT `WM_POOL_FK2` FOREIGN KEY (`PARENT_POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE WM_TRIGGER +( + `TRIGGER_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `NAME` varchar(128) NOT NULL, + `TRIGGER_EXPRESSION` varchar(1024), + `ACTION_EXPRESSION` varchar(1024), + PRIMARY KEY (`TRIGGER_ID`), + KEY `UNIQUE_WM_TRIGGER` (`RP_ID`, `NAME`), + CONSTRAINT `WM_TRIGGER_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE WM_POOL_TO_TRIGGER +( + `POOL_ID` bigint(20) NOT NULL, + `TRIGGER_ID` bigint(20) NOT NULL, + PRIMARY KEY (`POOL_ID`, `TRIGGER_ID`), + CONSTRAINT `WM_POOL_TO_TRIGGER_FK1` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`), + CONSTRAINT `WM_POOL_TO_TRIGGER_FK2` FOREIGN KEY (`TRIGGER_ID`) REFERENCES `WM_TRIGGER` (`TRIGGER_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE WM_MAPPING +( + `MAPPING_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `ENTITY_TYPE` varchar(10) NOT NULL, + `ENTITY_NAME` varchar(128) NOT NULL, + `POOL_ID` bigint(20) NOT NULL, + `ORDERING int, + PRIMARY KEY (`MAPPING_ID`), + KEY `UNIQUE_WM_MAPPING` (`RP_ID`, `ENTITY_TYPE`, `ENTITY_NAME`), + CONSTRAINT `WM_MAPPING_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`), + CONSTRAINT `WM_MAPPING_FK2` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; diff --git metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql index 6091801e97..f72c95097c 100644 --- metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql +++ metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql @@ -845,6 +845,68 @@ CREATE TABLE IF NOT EXISTS `METASTORE_DB_PROPERTIES` ( PRIMARY KEY(`PROPERTY_KEY`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- --------------------- +-- Resource plan tables. +-- --------------------- +CREATE TABLE WM_RESOURCEPLAN ( + `RP_ID` bigint(20) NOT NULL, + `NAME` varchar(128) NOT NULL, + `QUERY_PARALLELISM` int(11), + `STATUS` varchar(20) NOT NULL, + PRIMARY KEY (`RP_ID`), + KEY `UNIQUE_WM_RESOURCEPLAN` (`NAME`), +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE WM_POOL +( + `POOL_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `PATH` varchar(1024) NOT NULL, + `PARENT_POOL_ID` bigint(20), + `ALLOC_FRACTION` DOUBLE, + `QUERY_PARALLELISM` int(11), + PRIMARY KEY (`POOL_ID`), + KEY `UNIQUE_WM_POOL` (`RP_ID`, `PATH`), + CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`), + CONSTRAINT `WM_POOL_FK2` FOREIGN KEY (`PARENT_POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE WM_TRIGGER +( + `TRIGGER_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `NAME` varchar(128) NOT NULL, + `TRIGGER_EXPRESSION` varchar(1024), + `ACTION_EXPRESSION` varchar(1024), + PRIMARY KEY (`TRIGGER_ID`), + KEY `UNIQUE_WM_TRIGGER` (`RP_ID`, `NAME`), + CONSTRAINT `WM_TRIGGER_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE WM_POOL_TO_TRIGGER +( + `POOL_ID` bigint(20) NOT NULL, + `TRIGGER_ID` bigint(20) NOT NULL, + PRIMARY KEY (`POOL_ID`, `TRIGGER_ID`), + CONSTRAINT `WM_POOL_TO_TRIGGER_FK1` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`), + CONSTRAINT `WM_POOL_TO_TRIGGER_FK2` FOREIGN KEY (`TRIGGER_ID`) REFERENCES `WM_TRIGGER` (`TRIGGER_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE WM_MAPPING +( + `MAPPING_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `ENTITY_TYPE` varchar(10) NOT NULL, + `ENTITY_NAME` varchar(128) NOT NULL, + `POOL_ID` bigint(20) NOT NULL, + `ORDERING int, + PRIMARY KEY (`MAPPING_ID`), + KEY `UNIQUE_WM_MAPPING` (`RP_ID`, `ENTITY_TYPE`, `ENTITY_NAME`), + CONSTRAINT `WM_MAPPING_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`), + CONSTRAINT `WM_MAPPING_FK2` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + -- ---------------------------- -- Transaction and Lock Tables -- ---------------------------- diff --git metastore/scripts/upgrade/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql metastore/scripts/upgrade/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql index 120a1f88b3..caa059d893 100644 --- metastore/scripts/upgrade/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql +++ metastore/scripts/upgrade/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql @@ -5,6 +5,7 @@ SOURCE 042-HIVE-16575.mysql.sql; SOURCE 043-HIVE-16922.mysql.sql; SOURCE 044-HIVE-16997.mysql.sql; SOURCE 045-HIVE-16886.mysql.sql; +SOURCE 046-HIVE-17566.mysql.sql; UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' '; diff --git metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql new file mode 100644 index 0000000000..0ee0f758f1 --- /dev/null +++ metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql @@ -0,0 +1,76 @@ +CREATE TABLE WM_RESOURCEPLAN +( + RP_ID bigint NOT NULL, + "NAME" nvarchar(128) NOT NULL, + QUERY_PARALLELISM int, + STATUS nvarchar(20) NOT NULL +); + +ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME"); + + +CREATE TABLE WM_POOL +( + POOL_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + PATH nvarchar(1024) NOT NULL, + PARENT_POOL_ID bigint, + ALLOC_FRACTION DOUBLE, + QUERY_PARALLELISM int +); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, "NAME"); +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID); + + +CREATE TABLE WM_TRIGGER +( + TRIGGER_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + "NAME" nvarchar(128) NOT NULL, + TRIGGER_EXPRESSION nvarchar(1024), + ACTION_EXPRESSION nvarchar(1024) +); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME"); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + + +CREATE TABLE WM_POOL_TO_TRIGGER +( + POOL_ID bigint NOT NULL, + TRIGGER_ID bigint NOT NULL +); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID); + + +CREATE TABLE WM_MAPPING +( + MAPPING_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + ENTITY_TYPE nvarchar(10) NOT NULL, + ENTITY_NAME nvarchar(128) NOT NULL, + POOL_ID bigint NOT NULL, + ORDERING int +); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); diff --git metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql index 79b9efb741..47bfc5ae08 100644 --- metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql +++ metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql @@ -576,8 +576,60 @@ CREATE TABLE NOTIFICATION_SEQUENCE ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID); -INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 FROM DUAL WHERE NOT EXISTS ( SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE); +-- Tables to manage resource plans. +CREATE TABLE WM_RESOURCEPLAN +( + RP_ID bigint NOT NULL, + "NAME" nvarchar(128) NOT NULL, + QUERY_PARALLELISM int, + STATUS nvarchar(20) NOT NULL +); + +ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID); + +CREATE TABLE WM_POOL +( + POOL_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + PATH nvarchar(1024) NOT NULL, + PARENT_POOL_ID bigint, + ALLOC_FRACTION DOUBLE, + QUERY_PARALLELISM int +); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID); + +CREATE TABLE WM_TRIGGER +( + TRIGGER_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + "NAME" nvarchar(128) NOT NULL, + TRIGGER_EXPRESSION nvarchar(1024), + ACTION_EXPRESSION nvarchar(1024) +); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID); + +CREATE TABLE WM_POOL_TO_TRIGGER +( + POOL_ID bigint NOT NULL, + TRIGGER_ID bigint NOT NULL +); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID); + +CREATE TABLE WM_MAPPING +( + MAPPING_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + ENTITY_TYPE nvarchar(10) NOT NULL, + ENTITY_NAME nvarchar(128) NOT NULL, + POOL_ID bigint NOT NULL, + ORDERING int +); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID); -- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege] ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ; @@ -814,6 +866,30 @@ CREATE TABLE METASTORE_DB_PROPERTIES ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY); +-- Constraints for resource plan tables. + +CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME"); + +CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME"); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + ------------------------------ -- Transaction and lock tables ------------------------------ diff --git metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql index 9b8b162e23..33174c8a9a 100644 --- metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql +++ metastore/scripts/upgrade/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql @@ -5,6 +5,7 @@ SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual; @043-HIVE-16922.oracle.sql; @044-HIVE-16997.oracle.sql; @045-HIVE-16886.oracle.sql; +@046-HIVE-17566.oracle.sql; UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual; diff --git metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql new file mode 100644 index 0000000000..aa27a6486b --- /dev/null +++ metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql @@ -0,0 +1,88 @@ +CREATE TABLE "WM_RESOURCEPLAN" ( + "RP_ID" bigint NOT NULL, + "NAME" character varying(128) NOT NULL, + "QUERY_PARALLELISM" integer, + "STATUS" character varying(20) NOT NULL +); + +ALTER TABLE ONLY "WM_RESOURCEPLAN" + ADD CONSTRAINT "WM_RESOURCEPLAN_pkey" PRIMARY KEY ("RP_ID"); + +ALTER TABLE ONLY "WM_RESOURCEPLAN" + ADD CONSTRAINT "UNIQUE_WM_RESOURCEPLAN" UNIQUE ("NAME"); + + +CREATE TABLE "WM_POOL" ( + "POOL_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "PATH" character varying(1024) NOT NULL, + "PARENT_POOL_ID" bigint, + "ALLOC_FRACTION" DOUBLE, + "QUERY_PARALLELISM" integer +); + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "WM_POOL_pkey" PRIMARY KEY ("POOL_ID"); + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "UNIQUE_WM_POOL" UNIQUE ("RP_ID", "PATH"); + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; + + +CREATE TABLE "WM_TRIGGER" ( + "TRIGGER_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "NAME" character varying(128) NOT NULL, + "TRIGGER_EXPRESSION" character varying(1024) DEFAULT NULL::character varying, + "ACTION_EXPRESSION" character varying(1024) DEFAULT NULL::character varying +); + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "WM_TRIGGER_pkey" PRIMARY KEY ("TRIGGER_ID"); + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "UNIQUE_WM_TRIGGER" UNIQUE ("RP_ID", "NAME"); + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; + + +CREATE TABLE "WM_POOL_TO_TRIGGER" ( + "POOL_ID" bigint NOT NULL, + "TRIGGER_ID" bigint NOT NULL +); + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_pkey" PRIMARY KEY ("POOL_ID", "TRIGGER_ID"); + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "WM_TRIGGER" ("TRIGGER_ID") DEFERRABLE; + + +CREATE TABLE "WM_MAPPING" ( + "MAPPING_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "ENTITY_TYPE" character varying(10) NOT NULL, + "ENTITY_NAME" character varying(128) NOT NULL, + "POOL_ID" bigint NOT NULL, + "ORDERING" integer +); + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_pkey" PRIMARY KEY ("MAPPING_ID"); + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "UNIQUE_WM_MAPPING" UNIQUE ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME"); + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; diff --git metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql index 92a2d38c0e..5c770e2659 100644 --- metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql +++ metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql @@ -618,6 +618,45 @@ CREATE TABLE "METASTORE_DB_PROPERTIES" "DESCRIPTION" VARCHAR(1000) ); + +CREATE TABLE "WM_RESOURCEPLAN" ( + "RP_ID" bigint NOT NULL, + "NAME" character varying(128) NOT NULL, + "QUERY_PARALLELISM" integer, + "STATUS" character varying(20) NOT NULL +); + +CREATE TABLE "WM_POOL" ( + "POOL_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "PATH" character varying(1024) NOT NULL, + "PARENT_POOL_ID" bigint, + "ALLOC_FRACTION" DOUBLE, + "QUERY_PARALLELISM" integer +); + +CREATE TABLE "WM_TRIGGER" ( + "TRIGGER_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "NAME" character varying(128) NOT NULL, + "TRIGGER_EXPRESSION" character varying(1024) DEFAULT NULL::character varying, + "ACTION_EXPRESSION" character varying(1024) DEFAULT NULL::character varying +); + +CREATE TABLE "WM_POOL_TO_TRIGGER" ( + "POOL_ID" bigint NOT NULL, + "TRIGGER_ID" bigint NOT NULL +); + +CREATE TABLE "WM_MAPPING" ( + "MAPPING_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "ENTITY_TYPE" character varying(10) NOT NULL, + "ENTITY_NAME" character varying(128) NOT NULL, + "POOL_ID" bigint NOT NULL, + "ORDERING" integer +); + -- -- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: -- @@ -965,6 +1004,35 @@ ALTER TABLE ONLY "ROLE_MAP" ALTER TABLE ONLY "METASTORE_DB_PROPERTIES" ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY"); + +-- Resource plan: Primary key and unique key constraints. +ALTER TABLE ONLY "WM_RESOURCEPLAN" + ADD CONSTRAINT "WM_RESOURCEPLAN_pkey" PRIMARY KEY ("RP_ID"); + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "UNIQUE_WM_RESOURCEPLAN" UNIQUE ("NAME"); + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "WM_POOL_pkey" PRIMARY KEY ("POOL_ID"); + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "UNIQUE_WM_POOL" UNIQUE ("RP_ID", "PATH"); + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "WM_TRIGGER_pkey" PRIMARY KEY ("TRIGGER_ID"); + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "UNIQUE_WM_TRIGGER" UNIQUE ("RP_ID", "NAME"); + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_pkey" PRIMARY KEY ("POOL_ID", "TRIGGER_ID"); + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_pkey" PRIMARY KEY ("MAPPING_ID"); + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "UNIQUE_WM_MAPPING" UNIQUE ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME"); + -- -- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: -- @@ -1473,6 +1541,29 @@ ALTER TABLE ONLY "FUNCS" ALTER TABLE ONLY "FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE; +-- Resource plan FK constraints. + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "WM_TRIGGER" ("TRIGGER_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; + -- -- Name: public; Type: ACL; Schema: -; Owner: hiveuser -- diff --git metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql index c4d3d5f5d9..01d359e5f4 100644 --- metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql +++ metastore/scripts/upgrade/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql @@ -5,6 +5,7 @@ SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0'; \i 042-HIVE-16922.postgres.sql; \i 043-HIVE-16997.postgres.sql; \i 044-HIVE-16886.postgres.sql; +\i 045-HIVE-17566.postgres.sql; UPDATE "VERSION" SET "SCHEMA_VERSION"='3.0.0', "VERSION_COMMENT"='Hive release version 3.0.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0'; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 6ae6f32d02..9c51d8e1fa 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hive.metastore.model.MNotificationNextId; import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics; import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics; +import org.apache.hadoop.hive.metastore.model.MWMResourcePlan; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode; @@ -222,6 +223,7 @@ private boolean ensureDbInit() { initQueries.add(pm.newQuery(MConstraint.class, "childIntegerIndex < 0")); initQueries.add(pm.newQuery(MNotificationLog.class, "dbName == ''")); initQueries.add(pm.newQuery(MNotificationNextId.class, "nextEventId < -1")); + initQueries.add(pm.newQuery(MWMResourcePlan.class, "name == ''")); Query q; while ((q = initQueries.peekFirst()) != null) { q.execute(); diff --git ql/src/test/results/clientpositive/llap/sysdb.q.out ql/src/test/results/clientpositive/llap/sysdb.q.out index 425dd971f5..9813636531 100644 --- ql/src/test/results/clientpositive/llap/sysdb.q.out +++ ql/src/test/results/clientpositive/llap/sysdb.q.out @@ -2187,6 +2187,44 @@ POSTHOOK: Lineage: PARTITION_STATS_VIEW.part_id SIMPLE [(partition_params)partit POSTHOOK: Lineage: PARTITION_STATS_VIEW.raw_data_size EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] POSTHOOK: Lineage: PARTITION_STATS_VIEW.total_size EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] POSTHOOK: Lineage: PARTITION_STATS_VIEW.transient_last_ddl_time EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +PREHOOK: query: CREATE TABLE IF NOT EXISTS `WM_RESOURCEPLANS` ( + `NAME` string, + `STATUS` string, + `QUERY_PARALLELISM` int +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"NAME\", + \"STATUS\", + \"QUERY_PARALLELISM\" +FROM + \"WM_RESOURCEPLAN\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@WM_RESOURCEPLANS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `WM_RESOURCEPLANS` ( + `NAME` string, + `STATUS` string, + `QUERY_PARALLELISM` int +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"NAME\", + \"STATUS\", + \"QUERY_PARALLELISM\" +FROM + \"WM_RESOURCEPLAN\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@WM_RESOURCEPLANS +POSTHOOK: Output: database:sys PREHOOK: query: DROP DATABASE IF EXISTS INFORMATION_SCHEMA PREHOOK: type: DROPDATABASE POSTHOOK: query: DROP DATABASE IF EXISTS INFORMATION_SCHEMA @@ -2967,7 +3005,7 @@ POSTHOOK: query: select count(*) from cds POSTHOOK: type: QUERY POSTHOOK: Input: sys@cds #### A masked pattern was here #### -65 +66 PREHOOK: query: select column_name, type_name, integer_idx from columns_v2 order by column_name, integer_idx limit 5 PREHOOK: type: QUERY PREHOOK: Input: sys@columns_v2 @@ -3121,7 +3159,7 @@ POSTHOOK: query: select count(*) from sds POSTHOOK: type: QUERY POSTHOOK: Input: sys@sds #### A masked pattern was here #### -71 +72 PREHOOK: query: select param_key, param_value from sd_params order by param_key, param_value limit 5 PREHOOK: type: QUERY PREHOOK: Input: sys@sd_params @@ -3347,7 +3385,7 @@ POSTHOOK: Input: sys@table_params POSTHOOK: Input: sys@table_stats_view #### A masked pattern was here #### {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} 0 0 0 0 -{"BASIC_STATS":"true","COLUMN_STATS":{"column_name":"true","create_time":"true","grant_option":"true","grantor":"true","grantor_type":"true","principal_name":"true","principal_type":"true","tbl_col_priv":"true","tbl_column_grant_id":"true","tbl_id":"true"}} 0 0 0 0 +{"BASIC_STATS":"true","COLUMN_STATS":{"name":"true","query_parallelism":"true","status":"true"}} 0 0 0 0 {"BASIC_STATS":"true","COLUMN_STATS":{"next_val":"true","sequence_name":"true"}} 0 0 0 0 {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} 0 0 0 0 #### A masked pattern was here #### @@ -3552,6 +3590,7 @@ default sys tbl_col_privs BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL default sys tbl_privs BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL default sys tbls BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL default sys version BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL +default sys wm_resourceplans BASE_TABLE NULL NULL NULL NULL NULL YES NO NULL PREHOOK: query: select * from TABLE_PRIVILEGES order by GRANTOR, GRANTEE, TABLE_SCHEMA, TABLE_NAME, PRIVILEGE_TYPE limit 10 PREHOOK: type: QUERY PREHOOK: Input: information_schema@table_privileges diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index 797ed84eba..878b6f0789 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size895; - ::apache::thrift::protocol::TType _etype898; - xfer += iprot->readListBegin(_etype898, _size895); - this->success.resize(_size895); - uint32_t _i899; - for (_i899 = 0; _i899 < _size895; ++_i899) + uint32_t _size903; + ::apache::thrift::protocol::TType _etype906; + xfer += iprot->readListBegin(_etype906, _size903); + this->success.resize(_size903); + uint32_t _i907; + for (_i907 = 0; _i907 < _size903; ++_i907) { - xfer += iprot->readString(this->success[_i899]); + xfer += iprot->readString(this->success[_i907]); } xfer += iprot->readListEnd(); } @@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter900; - for (_iter900 = this->success.begin(); _iter900 != this->success.end(); ++_iter900) + std::vector ::const_iterator _iter908; + for (_iter908 = this->success.begin(); _iter908 != this->success.end(); ++_iter908) { - xfer += oprot->writeString((*_iter900)); + xfer += oprot->writeString((*_iter908)); } xfer += oprot->writeListEnd(); } @@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size901; - ::apache::thrift::protocol::TType _etype904; - xfer += iprot->readListBegin(_etype904, _size901); - (*(this->success)).resize(_size901); - uint32_t _i905; - for (_i905 = 0; _i905 < _size901; ++_i905) + uint32_t _size909; + ::apache::thrift::protocol::TType _etype912; + xfer += iprot->readListBegin(_etype912, _size909); + (*(this->success)).resize(_size909); + uint32_t _i913; + for (_i913 = 0; _i913 < _size909; ++_i913) { - xfer += iprot->readString((*(this->success))[_i905]); + xfer += iprot->readString((*(this->success))[_i913]); } xfer += iprot->readListEnd(); } @@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size906; - ::apache::thrift::protocol::TType _etype909; - xfer += iprot->readListBegin(_etype909, _size906); - this->success.resize(_size906); - uint32_t _i910; - for (_i910 = 0; _i910 < _size906; ++_i910) + uint32_t _size914; + ::apache::thrift::protocol::TType _etype917; + xfer += iprot->readListBegin(_etype917, _size914); + this->success.resize(_size914); + uint32_t _i918; + for (_i918 = 0; _i918 < _size914; ++_i918) { - xfer += iprot->readString(this->success[_i910]); + xfer += iprot->readString(this->success[_i918]); } xfer += iprot->readListEnd(); } @@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter911; - for (_iter911 = this->success.begin(); _iter911 != this->success.end(); ++_iter911) + std::vector ::const_iterator _iter919; + for (_iter919 = this->success.begin(); _iter919 != this->success.end(); ++_iter919) { - xfer += oprot->writeString((*_iter911)); + xfer += oprot->writeString((*_iter919)); } xfer += oprot->writeListEnd(); } @@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size912; - ::apache::thrift::protocol::TType _etype915; - xfer += iprot->readListBegin(_etype915, _size912); - (*(this->success)).resize(_size912); - uint32_t _i916; - for (_i916 = 0; _i916 < _size912; ++_i916) + uint32_t _size920; + ::apache::thrift::protocol::TType _etype923; + xfer += iprot->readListBegin(_etype923, _size920); + (*(this->success)).resize(_size920); + uint32_t _i924; + for (_i924 = 0; _i924 < _size920; ++_i924) { - xfer += iprot->readString((*(this->success))[_i916]); + xfer += iprot->readString((*(this->success))[_i924]); } xfer += iprot->readListEnd(); } @@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size917; - ::apache::thrift::protocol::TType _ktype918; - ::apache::thrift::protocol::TType _vtype919; - xfer += iprot->readMapBegin(_ktype918, _vtype919, _size917); - uint32_t _i921; - for (_i921 = 0; _i921 < _size917; ++_i921) + uint32_t _size925; + ::apache::thrift::protocol::TType _ktype926; + ::apache::thrift::protocol::TType _vtype927; + xfer += iprot->readMapBegin(_ktype926, _vtype927, _size925); + uint32_t _i929; + for (_i929 = 0; _i929 < _size925; ++_i929) { - std::string _key922; - xfer += iprot->readString(_key922); - Type& _val923 = this->success[_key922]; - xfer += _val923.read(iprot); + std::string _key930; + xfer += iprot->readString(_key930); + Type& _val931 = this->success[_key930]; + xfer += _val931.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter924; - for (_iter924 = this->success.begin(); _iter924 != this->success.end(); ++_iter924) + std::map ::const_iterator _iter932; + for (_iter932 = this->success.begin(); _iter932 != this->success.end(); ++_iter932) { - xfer += oprot->writeString(_iter924->first); - xfer += _iter924->second.write(oprot); + xfer += oprot->writeString(_iter932->first); + xfer += _iter932->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size925; - ::apache::thrift::protocol::TType _ktype926; - ::apache::thrift::protocol::TType _vtype927; - xfer += iprot->readMapBegin(_ktype926, _vtype927, _size925); - uint32_t _i929; - for (_i929 = 0; _i929 < _size925; ++_i929) + uint32_t _size933; + ::apache::thrift::protocol::TType _ktype934; + ::apache::thrift::protocol::TType _vtype935; + xfer += iprot->readMapBegin(_ktype934, _vtype935, _size933); + uint32_t _i937; + for (_i937 = 0; _i937 < _size933; ++_i937) { - std::string _key930; - xfer += iprot->readString(_key930); - Type& _val931 = (*(this->success))[_key930]; - xfer += _val931.read(iprot); + std::string _key938; + xfer += iprot->readString(_key938); + Type& _val939 = (*(this->success))[_key938]; + xfer += _val939.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size932; - ::apache::thrift::protocol::TType _etype935; - xfer += iprot->readListBegin(_etype935, _size932); - this->success.resize(_size932); - uint32_t _i936; - for (_i936 = 0; _i936 < _size932; ++_i936) + uint32_t _size940; + ::apache::thrift::protocol::TType _etype943; + xfer += iprot->readListBegin(_etype943, _size940); + this->success.resize(_size940); + uint32_t _i944; + for (_i944 = 0; _i944 < _size940; ++_i944) { - xfer += this->success[_i936].read(iprot); + xfer += this->success[_i944].read(iprot); } xfer += iprot->readListEnd(); } @@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter937; - for (_iter937 = this->success.begin(); _iter937 != this->success.end(); ++_iter937) + std::vector ::const_iterator _iter945; + for (_iter945 = this->success.begin(); _iter945 != this->success.end(); ++_iter945) { - xfer += (*_iter937).write(oprot); + xfer += (*_iter945).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size938; - ::apache::thrift::protocol::TType _etype941; - xfer += iprot->readListBegin(_etype941, _size938); - (*(this->success)).resize(_size938); - uint32_t _i942; - for (_i942 = 0; _i942 < _size938; ++_i942) + uint32_t _size946; + ::apache::thrift::protocol::TType _etype949; + xfer += iprot->readListBegin(_etype949, _size946); + (*(this->success)).resize(_size946); + uint32_t _i950; + for (_i950 = 0; _i950 < _size946; ++_i950) { - xfer += (*(this->success))[_i942].read(iprot); + xfer += (*(this->success))[_i950].read(iprot); } xfer += iprot->readListEnd(); } @@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size943; - ::apache::thrift::protocol::TType _etype946; - xfer += iprot->readListBegin(_etype946, _size943); - this->success.resize(_size943); - uint32_t _i947; - for (_i947 = 0; _i947 < _size943; ++_i947) + uint32_t _size951; + ::apache::thrift::protocol::TType _etype954; + xfer += iprot->readListBegin(_etype954, _size951); + this->success.resize(_size951); + uint32_t _i955; + for (_i955 = 0; _i955 < _size951; ++_i955) { - xfer += this->success[_i947].read(iprot); + xfer += this->success[_i955].read(iprot); } xfer += iprot->readListEnd(); } @@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter948; - for (_iter948 = this->success.begin(); _iter948 != this->success.end(); ++_iter948) + std::vector ::const_iterator _iter956; + for (_iter956 = this->success.begin(); _iter956 != this->success.end(); ++_iter956) { - xfer += (*_iter948).write(oprot); + xfer += (*_iter956).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size949; - ::apache::thrift::protocol::TType _etype952; - xfer += iprot->readListBegin(_etype952, _size949); - (*(this->success)).resize(_size949); - uint32_t _i953; - for (_i953 = 0; _i953 < _size949; ++_i953) + uint32_t _size957; + ::apache::thrift::protocol::TType _etype960; + xfer += iprot->readListBegin(_etype960, _size957); + (*(this->success)).resize(_size957); + uint32_t _i961; + for (_i961 = 0; _i961 < _size957; ++_i961) { - xfer += (*(this->success))[_i953].read(iprot); + xfer += (*(this->success))[_i961].read(iprot); } xfer += iprot->readListEnd(); } @@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size954; - ::apache::thrift::protocol::TType _etype957; - xfer += iprot->readListBegin(_etype957, _size954); - this->success.resize(_size954); - uint32_t _i958; - for (_i958 = 0; _i958 < _size954; ++_i958) + uint32_t _size962; + ::apache::thrift::protocol::TType _etype965; + xfer += iprot->readListBegin(_etype965, _size962); + this->success.resize(_size962); + uint32_t _i966; + for (_i966 = 0; _i966 < _size962; ++_i966) { - xfer += this->success[_i958].read(iprot); + xfer += this->success[_i966].read(iprot); } xfer += iprot->readListEnd(); } @@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter959; - for (_iter959 = this->success.begin(); _iter959 != this->success.end(); ++_iter959) + std::vector ::const_iterator _iter967; + for (_iter967 = this->success.begin(); _iter967 != this->success.end(); ++_iter967) { - xfer += (*_iter959).write(oprot); + xfer += (*_iter967).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size960; - ::apache::thrift::protocol::TType _etype963; - xfer += iprot->readListBegin(_etype963, _size960); - (*(this->success)).resize(_size960); - uint32_t _i964; - for (_i964 = 0; _i964 < _size960; ++_i964) + uint32_t _size968; + ::apache::thrift::protocol::TType _etype971; + xfer += iprot->readListBegin(_etype971, _size968); + (*(this->success)).resize(_size968); + uint32_t _i972; + for (_i972 = 0; _i972 < _size968; ++_i972) { - xfer += (*(this->success))[_i964].read(iprot); + xfer += (*(this->success))[_i972].read(iprot); } xfer += iprot->readListEnd(); } @@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size965; - ::apache::thrift::protocol::TType _etype968; - xfer += iprot->readListBegin(_etype968, _size965); - this->success.resize(_size965); - uint32_t _i969; - for (_i969 = 0; _i969 < _size965; ++_i969) + uint32_t _size973; + ::apache::thrift::protocol::TType _etype976; + xfer += iprot->readListBegin(_etype976, _size973); + this->success.resize(_size973); + uint32_t _i977; + for (_i977 = 0; _i977 < _size973; ++_i977) { - xfer += this->success[_i969].read(iprot); + xfer += this->success[_i977].read(iprot); } xfer += iprot->readListEnd(); } @@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter970; - for (_iter970 = this->success.begin(); _iter970 != this->success.end(); ++_iter970) + std::vector ::const_iterator _iter978; + for (_iter978 = this->success.begin(); _iter978 != this->success.end(); ++_iter978) { - xfer += (*_iter970).write(oprot); + xfer += (*_iter978).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size971; - ::apache::thrift::protocol::TType _etype974; - xfer += iprot->readListBegin(_etype974, _size971); - (*(this->success)).resize(_size971); - uint32_t _i975; - for (_i975 = 0; _i975 < _size971; ++_i975) + uint32_t _size979; + ::apache::thrift::protocol::TType _etype982; + xfer += iprot->readListBegin(_etype982, _size979); + (*(this->success)).resize(_size979); + uint32_t _i983; + for (_i983 = 0; _i983 < _size979; ++_i983) { - xfer += (*(this->success))[_i975].read(iprot); + xfer += (*(this->success))[_i983].read(iprot); } xfer += iprot->readListEnd(); } @@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size976; - ::apache::thrift::protocol::TType _etype979; - xfer += iprot->readListBegin(_etype979, _size976); - this->primaryKeys.resize(_size976); - uint32_t _i980; - for (_i980 = 0; _i980 < _size976; ++_i980) + uint32_t _size984; + ::apache::thrift::protocol::TType _etype987; + xfer += iprot->readListBegin(_etype987, _size984); + this->primaryKeys.resize(_size984); + uint32_t _i988; + for (_i988 = 0; _i988 < _size984; ++_i988) { - xfer += this->primaryKeys[_i980].read(iprot); + xfer += this->primaryKeys[_i988].read(iprot); } xfer += iprot->readListEnd(); } @@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size981; - ::apache::thrift::protocol::TType _etype984; - xfer += iprot->readListBegin(_etype984, _size981); - this->foreignKeys.resize(_size981); - uint32_t _i985; - for (_i985 = 0; _i985 < _size981; ++_i985) + uint32_t _size989; + ::apache::thrift::protocol::TType _etype992; + xfer += iprot->readListBegin(_etype992, _size989); + this->foreignKeys.resize(_size989); + uint32_t _i993; + for (_i993 = 0; _i993 < _size989; ++_i993) { - xfer += this->foreignKeys[_i985].read(iprot); + xfer += this->foreignKeys[_i993].read(iprot); } xfer += iprot->readListEnd(); } @@ -4558,14 +4558,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraints.clear(); - uint32_t _size986; - ::apache::thrift::protocol::TType _etype989; - xfer += iprot->readListBegin(_etype989, _size986); - this->uniqueConstraints.resize(_size986); - uint32_t _i990; - for (_i990 = 0; _i990 < _size986; ++_i990) + uint32_t _size994; + ::apache::thrift::protocol::TType _etype997; + xfer += iprot->readListBegin(_etype997, _size994); + this->uniqueConstraints.resize(_size994); + uint32_t _i998; + for (_i998 = 0; _i998 < _size994; ++_i998) { - xfer += this->uniqueConstraints[_i990].read(iprot); + xfer += this->uniqueConstraints[_i998].read(iprot); } xfer += iprot->readListEnd(); } @@ -4578,14 +4578,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraints.clear(); - uint32_t _size991; - ::apache::thrift::protocol::TType _etype994; - xfer += iprot->readListBegin(_etype994, _size991); - this->notNullConstraints.resize(_size991); - uint32_t _i995; - for (_i995 = 0; _i995 < _size991; ++_i995) + uint32_t _size999; + ::apache::thrift::protocol::TType _etype1002; + xfer += iprot->readListBegin(_etype1002, _size999); + this->notNullConstraints.resize(_size999); + uint32_t _i1003; + for (_i1003 = 0; _i1003 < _size999; ++_i1003) { - xfer += this->notNullConstraints[_i995].read(iprot); + xfer += this->notNullConstraints[_i1003].read(iprot); } xfer += iprot->readListEnd(); } @@ -4618,10 +4618,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter996; - for (_iter996 = this->primaryKeys.begin(); _iter996 != this->primaryKeys.end(); ++_iter996) + std::vector ::const_iterator _iter1004; + for (_iter1004 = this->primaryKeys.begin(); _iter1004 != this->primaryKeys.end(); ++_iter1004) { - xfer += (*_iter996).write(oprot); + xfer += (*_iter1004).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4630,10 +4630,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter997; - for (_iter997 = this->foreignKeys.begin(); _iter997 != this->foreignKeys.end(); ++_iter997) + std::vector ::const_iterator _iter1005; + for (_iter1005 = this->foreignKeys.begin(); _iter1005 != this->foreignKeys.end(); ++_iter1005) { - xfer += (*_iter997).write(oprot); + xfer += (*_iter1005).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4642,10 +4642,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraints.size())); - std::vector ::const_iterator _iter998; - for (_iter998 = this->uniqueConstraints.begin(); _iter998 != this->uniqueConstraints.end(); ++_iter998) + std::vector ::const_iterator _iter1006; + for (_iter1006 = this->uniqueConstraints.begin(); _iter1006 != this->uniqueConstraints.end(); ++_iter1006) { - xfer += (*_iter998).write(oprot); + xfer += (*_iter1006).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4654,10 +4654,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraints.size())); - std::vector ::const_iterator _iter999; - for (_iter999 = this->notNullConstraints.begin(); _iter999 != this->notNullConstraints.end(); ++_iter999) + std::vector ::const_iterator _iter1007; + for (_iter1007 = this->notNullConstraints.begin(); _iter1007 != this->notNullConstraints.end(); ++_iter1007) { - xfer += (*_iter999).write(oprot); + xfer += (*_iter1007).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4685,10 +4685,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->primaryKeys)).size())); - std::vector ::const_iterator _iter1000; - for (_iter1000 = (*(this->primaryKeys)).begin(); _iter1000 != (*(this->primaryKeys)).end(); ++_iter1000) + std::vector ::const_iterator _iter1008; + for (_iter1008 = (*(this->primaryKeys)).begin(); _iter1008 != (*(this->primaryKeys)).end(); ++_iter1008) { - xfer += (*_iter1000).write(oprot); + xfer += (*_iter1008).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4697,10 +4697,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->foreignKeys)).size())); - std::vector ::const_iterator _iter1001; - for (_iter1001 = (*(this->foreignKeys)).begin(); _iter1001 != (*(this->foreignKeys)).end(); ++_iter1001) + std::vector ::const_iterator _iter1009; + for (_iter1009 = (*(this->foreignKeys)).begin(); _iter1009 != (*(this->foreignKeys)).end(); ++_iter1009) { - xfer += (*_iter1001).write(oprot); + xfer += (*_iter1009).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4709,10 +4709,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->uniqueConstraints)).size())); - std::vector ::const_iterator _iter1002; - for (_iter1002 = (*(this->uniqueConstraints)).begin(); _iter1002 != (*(this->uniqueConstraints)).end(); ++_iter1002) + std::vector ::const_iterator _iter1010; + for (_iter1010 = (*(this->uniqueConstraints)).begin(); _iter1010 != (*(this->uniqueConstraints)).end(); ++_iter1010) { - xfer += (*_iter1002).write(oprot); + xfer += (*_iter1010).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4721,10 +4721,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->notNullConstraints)).size())); - std::vector ::const_iterator _iter1003; - for (_iter1003 = (*(this->notNullConstraints)).begin(); _iter1003 != (*(this->notNullConstraints)).end(); ++_iter1003) + std::vector ::const_iterator _iter1011; + for (_iter1011 = (*(this->notNullConstraints)).begin(); _iter1011 != (*(this->notNullConstraints)).end(); ++_iter1011) { - xfer += (*_iter1003).write(oprot); + xfer += (*_iter1011).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6478,14 +6478,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size1004; - ::apache::thrift::protocol::TType _etype1007; - xfer += iprot->readListBegin(_etype1007, _size1004); - this->partNames.resize(_size1004); - uint32_t _i1008; - for (_i1008 = 0; _i1008 < _size1004; ++_i1008) + uint32_t _size1012; + ::apache::thrift::protocol::TType _etype1015; + xfer += iprot->readListBegin(_etype1015, _size1012); + this->partNames.resize(_size1012); + uint32_t _i1016; + for (_i1016 = 0; _i1016 < _size1012; ++_i1016) { - xfer += iprot->readString(this->partNames[_i1008]); + xfer += iprot->readString(this->partNames[_i1016]); } xfer += iprot->readListEnd(); } @@ -6522,10 +6522,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter1009; - for (_iter1009 = this->partNames.begin(); _iter1009 != this->partNames.end(); ++_iter1009) + std::vector ::const_iterator _iter1017; + for (_iter1017 = this->partNames.begin(); _iter1017 != this->partNames.end(); ++_iter1017) { - xfer += oprot->writeString((*_iter1009)); + xfer += oprot->writeString((*_iter1017)); } xfer += oprot->writeListEnd(); } @@ -6557,10 +6557,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->partNames)).size())); - std::vector ::const_iterator _iter1010; - for (_iter1010 = (*(this->partNames)).begin(); _iter1010 != (*(this->partNames)).end(); ++_iter1010) + std::vector ::const_iterator _iter1018; + for (_iter1018 = (*(this->partNames)).begin(); _iter1018 != (*(this->partNames)).end(); ++_iter1018) { - xfer += oprot->writeString((*_iter1010)); + xfer += oprot->writeString((*_iter1018)); } xfer += oprot->writeListEnd(); } @@ -6804,14 +6804,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1011; - ::apache::thrift::protocol::TType _etype1014; - xfer += iprot->readListBegin(_etype1014, _size1011); - this->success.resize(_size1011); - uint32_t _i1015; - for (_i1015 = 0; _i1015 < _size1011; ++_i1015) + uint32_t _size1019; + ::apache::thrift::protocol::TType _etype1022; + xfer += iprot->readListBegin(_etype1022, _size1019); + this->success.resize(_size1019); + uint32_t _i1023; + for (_i1023 = 0; _i1023 < _size1019; ++_i1023) { - xfer += iprot->readString(this->success[_i1015]); + xfer += iprot->readString(this->success[_i1023]); } xfer += iprot->readListEnd(); } @@ -6850,10 +6850,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1016; - for (_iter1016 = this->success.begin(); _iter1016 != this->success.end(); ++_iter1016) + std::vector ::const_iterator _iter1024; + for (_iter1024 = this->success.begin(); _iter1024 != this->success.end(); ++_iter1024) { - xfer += oprot->writeString((*_iter1016)); + xfer += oprot->writeString((*_iter1024)); } xfer += oprot->writeListEnd(); } @@ -6898,14 +6898,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1017; - ::apache::thrift::protocol::TType _etype1020; - xfer += iprot->readListBegin(_etype1020, _size1017); - (*(this->success)).resize(_size1017); - uint32_t _i1021; - for (_i1021 = 0; _i1021 < _size1017; ++_i1021) + uint32_t _size1025; + ::apache::thrift::protocol::TType _etype1028; + xfer += iprot->readListBegin(_etype1028, _size1025); + (*(this->success)).resize(_size1025); + uint32_t _i1029; + for (_i1029 = 0; _i1029 < _size1025; ++_i1029) { - xfer += iprot->readString((*(this->success))[_i1021]); + xfer += iprot->readString((*(this->success))[_i1029]); } xfer += iprot->readListEnd(); } @@ -7075,14 +7075,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1022; - ::apache::thrift::protocol::TType _etype1025; - xfer += iprot->readListBegin(_etype1025, _size1022); - this->success.resize(_size1022); - uint32_t _i1026; - for (_i1026 = 0; _i1026 < _size1022; ++_i1026) + uint32_t _size1030; + ::apache::thrift::protocol::TType _etype1033; + xfer += iprot->readListBegin(_etype1033, _size1030); + this->success.resize(_size1030); + uint32_t _i1034; + for (_i1034 = 0; _i1034 < _size1030; ++_i1034) { - xfer += iprot->readString(this->success[_i1026]); + xfer += iprot->readString(this->success[_i1034]); } xfer += iprot->readListEnd(); } @@ -7121,10 +7121,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift:: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1027; - for (_iter1027 = this->success.begin(); _iter1027 != this->success.end(); ++_iter1027) + std::vector ::const_iterator _iter1035; + for (_iter1035 = this->success.begin(); _iter1035 != this->success.end(); ++_iter1035) { - xfer += oprot->writeString((*_iter1027)); + xfer += oprot->writeString((*_iter1035)); } xfer += oprot->writeListEnd(); } @@ -7169,14 +7169,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1028; - ::apache::thrift::protocol::TType _etype1031; - xfer += iprot->readListBegin(_etype1031, _size1028); - (*(this->success)).resize(_size1028); - uint32_t _i1032; - for (_i1032 = 0; _i1032 < _size1028; ++_i1032) + uint32_t _size1036; + ::apache::thrift::protocol::TType _etype1039; + xfer += iprot->readListBegin(_etype1039, _size1036); + (*(this->success)).resize(_size1036); + uint32_t _i1040; + for (_i1040 = 0; _i1040 < _size1036; ++_i1040) { - xfer += iprot->readString((*(this->success))[_i1032]); + xfer += iprot->readString((*(this->success))[_i1040]); } xfer += iprot->readListEnd(); } @@ -7251,14 +7251,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_types.clear(); - uint32_t _size1033; - ::apache::thrift::protocol::TType _etype1036; - xfer += iprot->readListBegin(_etype1036, _size1033); - this->tbl_types.resize(_size1033); - uint32_t _i1037; - for (_i1037 = 0; _i1037 < _size1033; ++_i1037) + uint32_t _size1041; + ::apache::thrift::protocol::TType _etype1044; + xfer += iprot->readListBegin(_etype1044, _size1041); + this->tbl_types.resize(_size1041); + uint32_t _i1045; + for (_i1045 = 0; _i1045 < _size1041; ++_i1045) { - xfer += iprot->readString(this->tbl_types[_i1037]); + xfer += iprot->readString(this->tbl_types[_i1045]); } xfer += iprot->readListEnd(); } @@ -7295,10 +7295,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter1038; - for (_iter1038 = this->tbl_types.begin(); _iter1038 != this->tbl_types.end(); ++_iter1038) + std::vector ::const_iterator _iter1046; + for (_iter1046 = this->tbl_types.begin(); _iter1046 != this->tbl_types.end(); ++_iter1046) { - xfer += oprot->writeString((*_iter1038)); + xfer += oprot->writeString((*_iter1046)); } xfer += oprot->writeListEnd(); } @@ -7330,10 +7330,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter1039; - for (_iter1039 = (*(this->tbl_types)).begin(); _iter1039 != (*(this->tbl_types)).end(); ++_iter1039) + std::vector ::const_iterator _iter1047; + for (_iter1047 = (*(this->tbl_types)).begin(); _iter1047 != (*(this->tbl_types)).end(); ++_iter1047) { - xfer += oprot->writeString((*_iter1039)); + xfer += oprot->writeString((*_iter1047)); } xfer += oprot->writeListEnd(); } @@ -7374,14 +7374,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1040; - ::apache::thrift::protocol::TType _etype1043; - xfer += iprot->readListBegin(_etype1043, _size1040); - this->success.resize(_size1040); - uint32_t _i1044; - for (_i1044 = 0; _i1044 < _size1040; ++_i1044) + uint32_t _size1048; + ::apache::thrift::protocol::TType _etype1051; + xfer += iprot->readListBegin(_etype1051, _size1048); + this->success.resize(_size1048); + uint32_t _i1052; + for (_i1052 = 0; _i1052 < _size1048; ++_i1052) { - xfer += this->success[_i1044].read(iprot); + xfer += this->success[_i1052].read(iprot); } xfer += iprot->readListEnd(); } @@ -7420,10 +7420,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1045; - for (_iter1045 = this->success.begin(); _iter1045 != this->success.end(); ++_iter1045) + std::vector ::const_iterator _iter1053; + for (_iter1053 = this->success.begin(); _iter1053 != this->success.end(); ++_iter1053) { - xfer += (*_iter1045).write(oprot); + xfer += (*_iter1053).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7468,14 +7468,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1046; - ::apache::thrift::protocol::TType _etype1049; - xfer += iprot->readListBegin(_etype1049, _size1046); - (*(this->success)).resize(_size1046); - uint32_t _i1050; - for (_i1050 = 0; _i1050 < _size1046; ++_i1050) + uint32_t _size1054; + ::apache::thrift::protocol::TType _etype1057; + xfer += iprot->readListBegin(_etype1057, _size1054); + (*(this->success)).resize(_size1054); + uint32_t _i1058; + for (_i1058 = 0; _i1058 < _size1054; ++_i1058) { - xfer += (*(this->success))[_i1050].read(iprot); + xfer += (*(this->success))[_i1058].read(iprot); } xfer += iprot->readListEnd(); } @@ -7613,14 +7613,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1051; - ::apache::thrift::protocol::TType _etype1054; - xfer += iprot->readListBegin(_etype1054, _size1051); - this->success.resize(_size1051); - uint32_t _i1055; - for (_i1055 = 0; _i1055 < _size1051; ++_i1055) + uint32_t _size1059; + ::apache::thrift::protocol::TType _etype1062; + xfer += iprot->readListBegin(_etype1062, _size1059); + this->success.resize(_size1059); + uint32_t _i1063; + for (_i1063 = 0; _i1063 < _size1059; ++_i1063) { - xfer += iprot->readString(this->success[_i1055]); + xfer += iprot->readString(this->success[_i1063]); } xfer += iprot->readListEnd(); } @@ -7659,10 +7659,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1056; - for (_iter1056 = this->success.begin(); _iter1056 != this->success.end(); ++_iter1056) + std::vector ::const_iterator _iter1064; + for (_iter1064 = this->success.begin(); _iter1064 != this->success.end(); ++_iter1064) { - xfer += oprot->writeString((*_iter1056)); + xfer += oprot->writeString((*_iter1064)); } xfer += oprot->writeListEnd(); } @@ -7707,14 +7707,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1057; - ::apache::thrift::protocol::TType _etype1060; - xfer += iprot->readListBegin(_etype1060, _size1057); - (*(this->success)).resize(_size1057); - uint32_t _i1061; - for (_i1061 = 0; _i1061 < _size1057; ++_i1061) + uint32_t _size1065; + ::apache::thrift::protocol::TType _etype1068; + xfer += iprot->readListBegin(_etype1068, _size1065); + (*(this->success)).resize(_size1065); + uint32_t _i1069; + for (_i1069 = 0; _i1069 < _size1065; ++_i1069) { - xfer += iprot->readString((*(this->success))[_i1061]); + xfer += iprot->readString((*(this->success))[_i1069]); } xfer += iprot->readListEnd(); } @@ -8024,14 +8024,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1062; - ::apache::thrift::protocol::TType _etype1065; - xfer += iprot->readListBegin(_etype1065, _size1062); - this->tbl_names.resize(_size1062); - uint32_t _i1066; - for (_i1066 = 0; _i1066 < _size1062; ++_i1066) + uint32_t _size1070; + ::apache::thrift::protocol::TType _etype1073; + xfer += iprot->readListBegin(_etype1073, _size1070); + this->tbl_names.resize(_size1070); + uint32_t _i1074; + for (_i1074 = 0; _i1074 < _size1070; ++_i1074) { - xfer += iprot->readString(this->tbl_names[_i1066]); + xfer += iprot->readString(this->tbl_names[_i1074]); } xfer += iprot->readListEnd(); } @@ -8064,10 +8064,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1067; - for (_iter1067 = this->tbl_names.begin(); _iter1067 != this->tbl_names.end(); ++_iter1067) + std::vector ::const_iterator _iter1075; + for (_iter1075 = this->tbl_names.begin(); _iter1075 != this->tbl_names.end(); ++_iter1075) { - xfer += oprot->writeString((*_iter1067)); + xfer += oprot->writeString((*_iter1075)); } xfer += oprot->writeListEnd(); } @@ -8095,10 +8095,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1068; - for (_iter1068 = (*(this->tbl_names)).begin(); _iter1068 != (*(this->tbl_names)).end(); ++_iter1068) + std::vector ::const_iterator _iter1076; + for (_iter1076 = (*(this->tbl_names)).begin(); _iter1076 != (*(this->tbl_names)).end(); ++_iter1076) { - xfer += oprot->writeString((*_iter1068)); + xfer += oprot->writeString((*_iter1076)); } xfer += oprot->writeListEnd(); } @@ -8139,14 +8139,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1069; - ::apache::thrift::protocol::TType _etype1072; - xfer += iprot->readListBegin(_etype1072, _size1069); - this->success.resize(_size1069); - uint32_t _i1073; - for (_i1073 = 0; _i1073 < _size1069; ++_i1073) + uint32_t _size1077; + ::apache::thrift::protocol::TType _etype1080; + xfer += iprot->readListBegin(_etype1080, _size1077); + this->success.resize(_size1077); + uint32_t _i1081; + for (_i1081 = 0; _i1081 < _size1077; ++_i1081) { - xfer += this->success[_i1073].read(iprot); + xfer += this->success[_i1081].read(iprot); } xfer += iprot->readListEnd(); } @@ -8177,10 +8177,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1074; - for (_iter1074 = this->success.begin(); _iter1074 != this->success.end(); ++_iter1074) + std::vector
::const_iterator _iter1082; + for (_iter1082 = this->success.begin(); _iter1082 != this->success.end(); ++_iter1082) { - xfer += (*_iter1074).write(oprot); + xfer += (*_iter1082).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8221,14 +8221,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1075; - ::apache::thrift::protocol::TType _etype1078; - xfer += iprot->readListBegin(_etype1078, _size1075); - (*(this->success)).resize(_size1075); - uint32_t _i1079; - for (_i1079 = 0; _i1079 < _size1075; ++_i1079) + uint32_t _size1083; + ::apache::thrift::protocol::TType _etype1086; + xfer += iprot->readListBegin(_etype1086, _size1083); + (*(this->success)).resize(_size1083); + uint32_t _i1087; + for (_i1087 = 0; _i1087 < _size1083; ++_i1087) { - xfer += (*(this->success))[_i1079].read(iprot); + xfer += (*(this->success))[_i1087].read(iprot); } xfer += iprot->readListEnd(); } @@ -8864,14 +8864,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1080; - ::apache::thrift::protocol::TType _etype1083; - xfer += iprot->readListBegin(_etype1083, _size1080); - this->success.resize(_size1080); - uint32_t _i1084; - for (_i1084 = 0; _i1084 < _size1080; ++_i1084) + uint32_t _size1088; + ::apache::thrift::protocol::TType _etype1091; + xfer += iprot->readListBegin(_etype1091, _size1088); + this->success.resize(_size1088); + uint32_t _i1092; + for (_i1092 = 0; _i1092 < _size1088; ++_i1092) { - xfer += iprot->readString(this->success[_i1084]); + xfer += iprot->readString(this->success[_i1092]); } xfer += iprot->readListEnd(); } @@ -8926,10 +8926,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1085; - for (_iter1085 = this->success.begin(); _iter1085 != this->success.end(); ++_iter1085) + std::vector ::const_iterator _iter1093; + for (_iter1093 = this->success.begin(); _iter1093 != this->success.end(); ++_iter1093) { - xfer += oprot->writeString((*_iter1085)); + xfer += oprot->writeString((*_iter1093)); } xfer += oprot->writeListEnd(); } @@ -8982,14 +8982,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1086; - ::apache::thrift::protocol::TType _etype1089; - xfer += iprot->readListBegin(_etype1089, _size1086); - (*(this->success)).resize(_size1086); - uint32_t _i1090; - for (_i1090 = 0; _i1090 < _size1086; ++_i1090) + uint32_t _size1094; + ::apache::thrift::protocol::TType _etype1097; + xfer += iprot->readListBegin(_etype1097, _size1094); + (*(this->success)).resize(_size1094); + uint32_t _i1098; + for (_i1098 = 0; _i1098 < _size1094; ++_i1098) { - xfer += iprot->readString((*(this->success))[_i1090]); + xfer += iprot->readString((*(this->success))[_i1098]); } xfer += iprot->readListEnd(); } @@ -10323,14 +10323,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1091; - ::apache::thrift::protocol::TType _etype1094; - xfer += iprot->readListBegin(_etype1094, _size1091); - this->new_parts.resize(_size1091); - uint32_t _i1095; - for (_i1095 = 0; _i1095 < _size1091; ++_i1095) + uint32_t _size1099; + ::apache::thrift::protocol::TType _etype1102; + xfer += iprot->readListBegin(_etype1102, _size1099); + this->new_parts.resize(_size1099); + uint32_t _i1103; + for (_i1103 = 0; _i1103 < _size1099; ++_i1103) { - xfer += this->new_parts[_i1095].read(iprot); + xfer += this->new_parts[_i1103].read(iprot); } xfer += iprot->readListEnd(); } @@ -10359,10 +10359,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1096; - for (_iter1096 = this->new_parts.begin(); _iter1096 != this->new_parts.end(); ++_iter1096) + std::vector ::const_iterator _iter1104; + for (_iter1104 = this->new_parts.begin(); _iter1104 != this->new_parts.end(); ++_iter1104) { - xfer += (*_iter1096).write(oprot); + xfer += (*_iter1104).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10386,10 +10386,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1097; - for (_iter1097 = (*(this->new_parts)).begin(); _iter1097 != (*(this->new_parts)).end(); ++_iter1097) + std::vector ::const_iterator _iter1105; + for (_iter1105 = (*(this->new_parts)).begin(); _iter1105 != (*(this->new_parts)).end(); ++_iter1105) { - xfer += (*_iter1097).write(oprot); + xfer += (*_iter1105).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10598,14 +10598,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1098; - ::apache::thrift::protocol::TType _etype1101; - xfer += iprot->readListBegin(_etype1101, _size1098); - this->new_parts.resize(_size1098); - uint32_t _i1102; - for (_i1102 = 0; _i1102 < _size1098; ++_i1102) + uint32_t _size1106; + ::apache::thrift::protocol::TType _etype1109; + xfer += iprot->readListBegin(_etype1109, _size1106); + this->new_parts.resize(_size1106); + uint32_t _i1110; + for (_i1110 = 0; _i1110 < _size1106; ++_i1110) { - xfer += this->new_parts[_i1102].read(iprot); + xfer += this->new_parts[_i1110].read(iprot); } xfer += iprot->readListEnd(); } @@ -10634,10 +10634,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1103; - for (_iter1103 = this->new_parts.begin(); _iter1103 != this->new_parts.end(); ++_iter1103) + std::vector ::const_iterator _iter1111; + for (_iter1111 = this->new_parts.begin(); _iter1111 != this->new_parts.end(); ++_iter1111) { - xfer += (*_iter1103).write(oprot); + xfer += (*_iter1111).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10661,10 +10661,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1104; - for (_iter1104 = (*(this->new_parts)).begin(); _iter1104 != (*(this->new_parts)).end(); ++_iter1104) + std::vector ::const_iterator _iter1112; + for (_iter1112 = (*(this->new_parts)).begin(); _iter1112 != (*(this->new_parts)).end(); ++_iter1112) { - xfer += (*_iter1104).write(oprot); + xfer += (*_iter1112).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10889,14 +10889,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1105; - ::apache::thrift::protocol::TType _etype1108; - xfer += iprot->readListBegin(_etype1108, _size1105); - this->part_vals.resize(_size1105); - uint32_t _i1109; - for (_i1109 = 0; _i1109 < _size1105; ++_i1109) + uint32_t _size1113; + ::apache::thrift::protocol::TType _etype1116; + xfer += iprot->readListBegin(_etype1116, _size1113); + this->part_vals.resize(_size1113); + uint32_t _i1117; + for (_i1117 = 0; _i1117 < _size1113; ++_i1117) { - xfer += iprot->readString(this->part_vals[_i1109]); + xfer += iprot->readString(this->part_vals[_i1117]); } xfer += iprot->readListEnd(); } @@ -10933,10 +10933,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1110; - for (_iter1110 = this->part_vals.begin(); _iter1110 != this->part_vals.end(); ++_iter1110) + std::vector ::const_iterator _iter1118; + for (_iter1118 = this->part_vals.begin(); _iter1118 != this->part_vals.end(); ++_iter1118) { - xfer += oprot->writeString((*_iter1110)); + xfer += oprot->writeString((*_iter1118)); } xfer += oprot->writeListEnd(); } @@ -10968,10 +10968,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1111; - for (_iter1111 = (*(this->part_vals)).begin(); _iter1111 != (*(this->part_vals)).end(); ++_iter1111) + std::vector ::const_iterator _iter1119; + for (_iter1119 = (*(this->part_vals)).begin(); _iter1119 != (*(this->part_vals)).end(); ++_iter1119) { - xfer += oprot->writeString((*_iter1111)); + xfer += oprot->writeString((*_iter1119)); } xfer += oprot->writeListEnd(); } @@ -11443,14 +11443,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1112; - ::apache::thrift::protocol::TType _etype1115; - xfer += iprot->readListBegin(_etype1115, _size1112); - this->part_vals.resize(_size1112); - uint32_t _i1116; - for (_i1116 = 0; _i1116 < _size1112; ++_i1116) + uint32_t _size1120; + ::apache::thrift::protocol::TType _etype1123; + xfer += iprot->readListBegin(_etype1123, _size1120); + this->part_vals.resize(_size1120); + uint32_t _i1124; + for (_i1124 = 0; _i1124 < _size1120; ++_i1124) { - xfer += iprot->readString(this->part_vals[_i1116]); + xfer += iprot->readString(this->part_vals[_i1124]); } xfer += iprot->readListEnd(); } @@ -11495,10 +11495,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1117; - for (_iter1117 = this->part_vals.begin(); _iter1117 != this->part_vals.end(); ++_iter1117) + std::vector ::const_iterator _iter1125; + for (_iter1125 = this->part_vals.begin(); _iter1125 != this->part_vals.end(); ++_iter1125) { - xfer += oprot->writeString((*_iter1117)); + xfer += oprot->writeString((*_iter1125)); } xfer += oprot->writeListEnd(); } @@ -11534,10 +11534,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1118; - for (_iter1118 = (*(this->part_vals)).begin(); _iter1118 != (*(this->part_vals)).end(); ++_iter1118) + std::vector ::const_iterator _iter1126; + for (_iter1126 = (*(this->part_vals)).begin(); _iter1126 != (*(this->part_vals)).end(); ++_iter1126) { - xfer += oprot->writeString((*_iter1118)); + xfer += oprot->writeString((*_iter1126)); } xfer += oprot->writeListEnd(); } @@ -12340,14 +12340,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1119; - ::apache::thrift::protocol::TType _etype1122; - xfer += iprot->readListBegin(_etype1122, _size1119); - this->part_vals.resize(_size1119); - uint32_t _i1123; - for (_i1123 = 0; _i1123 < _size1119; ++_i1123) + uint32_t _size1127; + ::apache::thrift::protocol::TType _etype1130; + xfer += iprot->readListBegin(_etype1130, _size1127); + this->part_vals.resize(_size1127); + uint32_t _i1131; + for (_i1131 = 0; _i1131 < _size1127; ++_i1131) { - xfer += iprot->readString(this->part_vals[_i1123]); + xfer += iprot->readString(this->part_vals[_i1131]); } xfer += iprot->readListEnd(); } @@ -12392,10 +12392,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1124; - for (_iter1124 = this->part_vals.begin(); _iter1124 != this->part_vals.end(); ++_iter1124) + std::vector ::const_iterator _iter1132; + for (_iter1132 = this->part_vals.begin(); _iter1132 != this->part_vals.end(); ++_iter1132) { - xfer += oprot->writeString((*_iter1124)); + xfer += oprot->writeString((*_iter1132)); } xfer += oprot->writeListEnd(); } @@ -12431,10 +12431,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1125; - for (_iter1125 = (*(this->part_vals)).begin(); _iter1125 != (*(this->part_vals)).end(); ++_iter1125) + std::vector ::const_iterator _iter1133; + for (_iter1133 = (*(this->part_vals)).begin(); _iter1133 != (*(this->part_vals)).end(); ++_iter1133) { - xfer += oprot->writeString((*_iter1125)); + xfer += oprot->writeString((*_iter1133)); } xfer += oprot->writeListEnd(); } @@ -12643,14 +12643,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1126; - ::apache::thrift::protocol::TType _etype1129; - xfer += iprot->readListBegin(_etype1129, _size1126); - this->part_vals.resize(_size1126); - uint32_t _i1130; - for (_i1130 = 0; _i1130 < _size1126; ++_i1130) + uint32_t _size1134; + ::apache::thrift::protocol::TType _etype1137; + xfer += iprot->readListBegin(_etype1137, _size1134); + this->part_vals.resize(_size1134); + uint32_t _i1138; + for (_i1138 = 0; _i1138 < _size1134; ++_i1138) { - xfer += iprot->readString(this->part_vals[_i1130]); + xfer += iprot->readString(this->part_vals[_i1138]); } xfer += iprot->readListEnd(); } @@ -12703,10 +12703,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1131; - for (_iter1131 = this->part_vals.begin(); _iter1131 != this->part_vals.end(); ++_iter1131) + std::vector ::const_iterator _iter1139; + for (_iter1139 = this->part_vals.begin(); _iter1139 != this->part_vals.end(); ++_iter1139) { - xfer += oprot->writeString((*_iter1131)); + xfer += oprot->writeString((*_iter1139)); } xfer += oprot->writeListEnd(); } @@ -12746,10 +12746,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1132; - for (_iter1132 = (*(this->part_vals)).begin(); _iter1132 != (*(this->part_vals)).end(); ++_iter1132) + std::vector ::const_iterator _iter1140; + for (_iter1140 = (*(this->part_vals)).begin(); _iter1140 != (*(this->part_vals)).end(); ++_iter1140) { - xfer += oprot->writeString((*_iter1132)); + xfer += oprot->writeString((*_iter1140)); } xfer += oprot->writeListEnd(); } @@ -13755,14 +13755,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1133; - ::apache::thrift::protocol::TType _etype1136; - xfer += iprot->readListBegin(_etype1136, _size1133); - this->part_vals.resize(_size1133); - uint32_t _i1137; - for (_i1137 = 0; _i1137 < _size1133; ++_i1137) + uint32_t _size1141; + ::apache::thrift::protocol::TType _etype1144; + xfer += iprot->readListBegin(_etype1144, _size1141); + this->part_vals.resize(_size1141); + uint32_t _i1145; + for (_i1145 = 0; _i1145 < _size1141; ++_i1145) { - xfer += iprot->readString(this->part_vals[_i1137]); + xfer += iprot->readString(this->part_vals[_i1145]); } xfer += iprot->readListEnd(); } @@ -13799,10 +13799,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1138; - for (_iter1138 = this->part_vals.begin(); _iter1138 != this->part_vals.end(); ++_iter1138) + std::vector ::const_iterator _iter1146; + for (_iter1146 = this->part_vals.begin(); _iter1146 != this->part_vals.end(); ++_iter1146) { - xfer += oprot->writeString((*_iter1138)); + xfer += oprot->writeString((*_iter1146)); } xfer += oprot->writeListEnd(); } @@ -13834,10 +13834,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1139; - for (_iter1139 = (*(this->part_vals)).begin(); _iter1139 != (*(this->part_vals)).end(); ++_iter1139) + std::vector ::const_iterator _iter1147; + for (_iter1147 = (*(this->part_vals)).begin(); _iter1147 != (*(this->part_vals)).end(); ++_iter1147) { - xfer += oprot->writeString((*_iter1139)); + xfer += oprot->writeString((*_iter1147)); } xfer += oprot->writeListEnd(); } @@ -14026,17 +14026,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1140; - ::apache::thrift::protocol::TType _ktype1141; - ::apache::thrift::protocol::TType _vtype1142; - xfer += iprot->readMapBegin(_ktype1141, _vtype1142, _size1140); - uint32_t _i1144; - for (_i1144 = 0; _i1144 < _size1140; ++_i1144) + uint32_t _size1148; + ::apache::thrift::protocol::TType _ktype1149; + ::apache::thrift::protocol::TType _vtype1150; + xfer += iprot->readMapBegin(_ktype1149, _vtype1150, _size1148); + uint32_t _i1152; + for (_i1152 = 0; _i1152 < _size1148; ++_i1152) { - std::string _key1145; - xfer += iprot->readString(_key1145); - std::string& _val1146 = this->partitionSpecs[_key1145]; - xfer += iprot->readString(_val1146); + std::string _key1153; + xfer += iprot->readString(_key1153); + std::string& _val1154 = this->partitionSpecs[_key1153]; + xfer += iprot->readString(_val1154); } xfer += iprot->readMapEnd(); } @@ -14097,11 +14097,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1147; - for (_iter1147 = this->partitionSpecs.begin(); _iter1147 != this->partitionSpecs.end(); ++_iter1147) + std::map ::const_iterator _iter1155; + for (_iter1155 = this->partitionSpecs.begin(); _iter1155 != this->partitionSpecs.end(); ++_iter1155) { - xfer += oprot->writeString(_iter1147->first); - xfer += oprot->writeString(_iter1147->second); + xfer += oprot->writeString(_iter1155->first); + xfer += oprot->writeString(_iter1155->second); } xfer += oprot->writeMapEnd(); } @@ -14141,11 +14141,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1148; - for (_iter1148 = (*(this->partitionSpecs)).begin(); _iter1148 != (*(this->partitionSpecs)).end(); ++_iter1148) + std::map ::const_iterator _iter1156; + for (_iter1156 = (*(this->partitionSpecs)).begin(); _iter1156 != (*(this->partitionSpecs)).end(); ++_iter1156) { - xfer += oprot->writeString(_iter1148->first); - xfer += oprot->writeString(_iter1148->second); + xfer += oprot->writeString(_iter1156->first); + xfer += oprot->writeString(_iter1156->second); } xfer += oprot->writeMapEnd(); } @@ -14390,17 +14390,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1149; - ::apache::thrift::protocol::TType _ktype1150; - ::apache::thrift::protocol::TType _vtype1151; - xfer += iprot->readMapBegin(_ktype1150, _vtype1151, _size1149); - uint32_t _i1153; - for (_i1153 = 0; _i1153 < _size1149; ++_i1153) + uint32_t _size1157; + ::apache::thrift::protocol::TType _ktype1158; + ::apache::thrift::protocol::TType _vtype1159; + xfer += iprot->readMapBegin(_ktype1158, _vtype1159, _size1157); + uint32_t _i1161; + for (_i1161 = 0; _i1161 < _size1157; ++_i1161) { - std::string _key1154; - xfer += iprot->readString(_key1154); - std::string& _val1155 = this->partitionSpecs[_key1154]; - xfer += iprot->readString(_val1155); + std::string _key1162; + xfer += iprot->readString(_key1162); + std::string& _val1163 = this->partitionSpecs[_key1162]; + xfer += iprot->readString(_val1163); } xfer += iprot->readMapEnd(); } @@ -14461,11 +14461,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1156; - for (_iter1156 = this->partitionSpecs.begin(); _iter1156 != this->partitionSpecs.end(); ++_iter1156) + std::map ::const_iterator _iter1164; + for (_iter1164 = this->partitionSpecs.begin(); _iter1164 != this->partitionSpecs.end(); ++_iter1164) { - xfer += oprot->writeString(_iter1156->first); - xfer += oprot->writeString(_iter1156->second); + xfer += oprot->writeString(_iter1164->first); + xfer += oprot->writeString(_iter1164->second); } xfer += oprot->writeMapEnd(); } @@ -14505,11 +14505,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1157; - for (_iter1157 = (*(this->partitionSpecs)).begin(); _iter1157 != (*(this->partitionSpecs)).end(); ++_iter1157) + std::map ::const_iterator _iter1165; + for (_iter1165 = (*(this->partitionSpecs)).begin(); _iter1165 != (*(this->partitionSpecs)).end(); ++_iter1165) { - xfer += oprot->writeString(_iter1157->first); - xfer += oprot->writeString(_iter1157->second); + xfer += oprot->writeString(_iter1165->first); + xfer += oprot->writeString(_iter1165->second); } xfer += oprot->writeMapEnd(); } @@ -14566,14 +14566,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1158; - ::apache::thrift::protocol::TType _etype1161; - xfer += iprot->readListBegin(_etype1161, _size1158); - this->success.resize(_size1158); - uint32_t _i1162; - for (_i1162 = 0; _i1162 < _size1158; ++_i1162) + uint32_t _size1166; + ::apache::thrift::protocol::TType _etype1169; + xfer += iprot->readListBegin(_etype1169, _size1166); + this->success.resize(_size1166); + uint32_t _i1170; + for (_i1170 = 0; _i1170 < _size1166; ++_i1170) { - xfer += this->success[_i1162].read(iprot); + xfer += this->success[_i1170].read(iprot); } xfer += iprot->readListEnd(); } @@ -14636,10 +14636,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1163; - for (_iter1163 = this->success.begin(); _iter1163 != this->success.end(); ++_iter1163) + std::vector ::const_iterator _iter1171; + for (_iter1171 = this->success.begin(); _iter1171 != this->success.end(); ++_iter1171) { - xfer += (*_iter1163).write(oprot); + xfer += (*_iter1171).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14696,14 +14696,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1164; - ::apache::thrift::protocol::TType _etype1167; - xfer += iprot->readListBegin(_etype1167, _size1164); - (*(this->success)).resize(_size1164); - uint32_t _i1168; - for (_i1168 = 0; _i1168 < _size1164; ++_i1168) + uint32_t _size1172; + ::apache::thrift::protocol::TType _etype1175; + xfer += iprot->readListBegin(_etype1175, _size1172); + (*(this->success)).resize(_size1172); + uint32_t _i1176; + for (_i1176 = 0; _i1176 < _size1172; ++_i1176) { - xfer += (*(this->success))[_i1168].read(iprot); + xfer += (*(this->success))[_i1176].read(iprot); } xfer += iprot->readListEnd(); } @@ -14802,14 +14802,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1169; - ::apache::thrift::protocol::TType _etype1172; - xfer += iprot->readListBegin(_etype1172, _size1169); - this->part_vals.resize(_size1169); - uint32_t _i1173; - for (_i1173 = 0; _i1173 < _size1169; ++_i1173) + uint32_t _size1177; + ::apache::thrift::protocol::TType _etype1180; + xfer += iprot->readListBegin(_etype1180, _size1177); + this->part_vals.resize(_size1177); + uint32_t _i1181; + for (_i1181 = 0; _i1181 < _size1177; ++_i1181) { - xfer += iprot->readString(this->part_vals[_i1173]); + xfer += iprot->readString(this->part_vals[_i1181]); } xfer += iprot->readListEnd(); } @@ -14830,14 +14830,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1174; - ::apache::thrift::protocol::TType _etype1177; - xfer += iprot->readListBegin(_etype1177, _size1174); - this->group_names.resize(_size1174); - uint32_t _i1178; - for (_i1178 = 0; _i1178 < _size1174; ++_i1178) + uint32_t _size1182; + ::apache::thrift::protocol::TType _etype1185; + xfer += iprot->readListBegin(_etype1185, _size1182); + this->group_names.resize(_size1182); + uint32_t _i1186; + for (_i1186 = 0; _i1186 < _size1182; ++_i1186) { - xfer += iprot->readString(this->group_names[_i1178]); + xfer += iprot->readString(this->group_names[_i1186]); } xfer += iprot->readListEnd(); } @@ -14874,10 +14874,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1179; - for (_iter1179 = this->part_vals.begin(); _iter1179 != this->part_vals.end(); ++_iter1179) + std::vector ::const_iterator _iter1187; + for (_iter1187 = this->part_vals.begin(); _iter1187 != this->part_vals.end(); ++_iter1187) { - xfer += oprot->writeString((*_iter1179)); + xfer += oprot->writeString((*_iter1187)); } xfer += oprot->writeListEnd(); } @@ -14890,10 +14890,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1180; - for (_iter1180 = this->group_names.begin(); _iter1180 != this->group_names.end(); ++_iter1180) + std::vector ::const_iterator _iter1188; + for (_iter1188 = this->group_names.begin(); _iter1188 != this->group_names.end(); ++_iter1188) { - xfer += oprot->writeString((*_iter1180)); + xfer += oprot->writeString((*_iter1188)); } xfer += oprot->writeListEnd(); } @@ -14925,10 +14925,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1181; - for (_iter1181 = (*(this->part_vals)).begin(); _iter1181 != (*(this->part_vals)).end(); ++_iter1181) + std::vector ::const_iterator _iter1189; + for (_iter1189 = (*(this->part_vals)).begin(); _iter1189 != (*(this->part_vals)).end(); ++_iter1189) { - xfer += oprot->writeString((*_iter1181)); + xfer += oprot->writeString((*_iter1189)); } xfer += oprot->writeListEnd(); } @@ -14941,10 +14941,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1182; - for (_iter1182 = (*(this->group_names)).begin(); _iter1182 != (*(this->group_names)).end(); ++_iter1182) + std::vector ::const_iterator _iter1190; + for (_iter1190 = (*(this->group_names)).begin(); _iter1190 != (*(this->group_names)).end(); ++_iter1190) { - xfer += oprot->writeString((*_iter1182)); + xfer += oprot->writeString((*_iter1190)); } xfer += oprot->writeListEnd(); } @@ -15503,14 +15503,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1183; - ::apache::thrift::protocol::TType _etype1186; - xfer += iprot->readListBegin(_etype1186, _size1183); - this->success.resize(_size1183); - uint32_t _i1187; - for (_i1187 = 0; _i1187 < _size1183; ++_i1187) + uint32_t _size1191; + ::apache::thrift::protocol::TType _etype1194; + xfer += iprot->readListBegin(_etype1194, _size1191); + this->success.resize(_size1191); + uint32_t _i1195; + for (_i1195 = 0; _i1195 < _size1191; ++_i1195) { - xfer += this->success[_i1187].read(iprot); + xfer += this->success[_i1195].read(iprot); } xfer += iprot->readListEnd(); } @@ -15557,10 +15557,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1188; - for (_iter1188 = this->success.begin(); _iter1188 != this->success.end(); ++_iter1188) + std::vector ::const_iterator _iter1196; + for (_iter1196 = this->success.begin(); _iter1196 != this->success.end(); ++_iter1196) { - xfer += (*_iter1188).write(oprot); + xfer += (*_iter1196).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15609,14 +15609,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1189; - ::apache::thrift::protocol::TType _etype1192; - xfer += iprot->readListBegin(_etype1192, _size1189); - (*(this->success)).resize(_size1189); - uint32_t _i1193; - for (_i1193 = 0; _i1193 < _size1189; ++_i1193) + uint32_t _size1197; + ::apache::thrift::protocol::TType _etype1200; + xfer += iprot->readListBegin(_etype1200, _size1197); + (*(this->success)).resize(_size1197); + uint32_t _i1201; + for (_i1201 = 0; _i1201 < _size1197; ++_i1201) { - xfer += (*(this->success))[_i1193].read(iprot); + xfer += (*(this->success))[_i1201].read(iprot); } xfer += iprot->readListEnd(); } @@ -15715,14 +15715,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1194; - ::apache::thrift::protocol::TType _etype1197; - xfer += iprot->readListBegin(_etype1197, _size1194); - this->group_names.resize(_size1194); - uint32_t _i1198; - for (_i1198 = 0; _i1198 < _size1194; ++_i1198) + uint32_t _size1202; + ::apache::thrift::protocol::TType _etype1205; + xfer += iprot->readListBegin(_etype1205, _size1202); + this->group_names.resize(_size1202); + uint32_t _i1206; + for (_i1206 = 0; _i1206 < _size1202; ++_i1206) { - xfer += iprot->readString(this->group_names[_i1198]); + xfer += iprot->readString(this->group_names[_i1206]); } xfer += iprot->readListEnd(); } @@ -15767,10 +15767,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1199; - for (_iter1199 = this->group_names.begin(); _iter1199 != this->group_names.end(); ++_iter1199) + std::vector ::const_iterator _iter1207; + for (_iter1207 = this->group_names.begin(); _iter1207 != this->group_names.end(); ++_iter1207) { - xfer += oprot->writeString((*_iter1199)); + xfer += oprot->writeString((*_iter1207)); } xfer += oprot->writeListEnd(); } @@ -15810,10 +15810,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1200; - for (_iter1200 = (*(this->group_names)).begin(); _iter1200 != (*(this->group_names)).end(); ++_iter1200) + std::vector ::const_iterator _iter1208; + for (_iter1208 = (*(this->group_names)).begin(); _iter1208 != (*(this->group_names)).end(); ++_iter1208) { - xfer += oprot->writeString((*_iter1200)); + xfer += oprot->writeString((*_iter1208)); } xfer += oprot->writeListEnd(); } @@ -15854,14 +15854,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1201; - ::apache::thrift::protocol::TType _etype1204; - xfer += iprot->readListBegin(_etype1204, _size1201); - this->success.resize(_size1201); - uint32_t _i1205; - for (_i1205 = 0; _i1205 < _size1201; ++_i1205) + uint32_t _size1209; + ::apache::thrift::protocol::TType _etype1212; + xfer += iprot->readListBegin(_etype1212, _size1209); + this->success.resize(_size1209); + uint32_t _i1213; + for (_i1213 = 0; _i1213 < _size1209; ++_i1213) { - xfer += this->success[_i1205].read(iprot); + xfer += this->success[_i1213].read(iprot); } xfer += iprot->readListEnd(); } @@ -15908,10 +15908,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1206; - for (_iter1206 = this->success.begin(); _iter1206 != this->success.end(); ++_iter1206) + std::vector ::const_iterator _iter1214; + for (_iter1214 = this->success.begin(); _iter1214 != this->success.end(); ++_iter1214) { - xfer += (*_iter1206).write(oprot); + xfer += (*_iter1214).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15960,14 +15960,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1207; - ::apache::thrift::protocol::TType _etype1210; - xfer += iprot->readListBegin(_etype1210, _size1207); - (*(this->success)).resize(_size1207); - uint32_t _i1211; - for (_i1211 = 0; _i1211 < _size1207; ++_i1211) + uint32_t _size1215; + ::apache::thrift::protocol::TType _etype1218; + xfer += iprot->readListBegin(_etype1218, _size1215); + (*(this->success)).resize(_size1215); + uint32_t _i1219; + for (_i1219 = 0; _i1219 < _size1215; ++_i1219) { - xfer += (*(this->success))[_i1211].read(iprot); + xfer += (*(this->success))[_i1219].read(iprot); } xfer += iprot->readListEnd(); } @@ -16145,14 +16145,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1212; - ::apache::thrift::protocol::TType _etype1215; - xfer += iprot->readListBegin(_etype1215, _size1212); - this->success.resize(_size1212); - uint32_t _i1216; - for (_i1216 = 0; _i1216 < _size1212; ++_i1216) + uint32_t _size1220; + ::apache::thrift::protocol::TType _etype1223; + xfer += iprot->readListBegin(_etype1223, _size1220); + this->success.resize(_size1220); + uint32_t _i1224; + for (_i1224 = 0; _i1224 < _size1220; ++_i1224) { - xfer += this->success[_i1216].read(iprot); + xfer += this->success[_i1224].read(iprot); } xfer += iprot->readListEnd(); } @@ -16199,10 +16199,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1217; - for (_iter1217 = this->success.begin(); _iter1217 != this->success.end(); ++_iter1217) + std::vector ::const_iterator _iter1225; + for (_iter1225 = this->success.begin(); _iter1225 != this->success.end(); ++_iter1225) { - xfer += (*_iter1217).write(oprot); + xfer += (*_iter1225).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16251,14 +16251,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1218; - ::apache::thrift::protocol::TType _etype1221; - xfer += iprot->readListBegin(_etype1221, _size1218); - (*(this->success)).resize(_size1218); - uint32_t _i1222; - for (_i1222 = 0; _i1222 < _size1218; ++_i1222) + uint32_t _size1226; + ::apache::thrift::protocol::TType _etype1229; + xfer += iprot->readListBegin(_etype1229, _size1226); + (*(this->success)).resize(_size1226); + uint32_t _i1230; + for (_i1230 = 0; _i1230 < _size1226; ++_i1230) { - xfer += (*(this->success))[_i1222].read(iprot); + xfer += (*(this->success))[_i1230].read(iprot); } xfer += iprot->readListEnd(); } @@ -16436,14 +16436,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1223; - ::apache::thrift::protocol::TType _etype1226; - xfer += iprot->readListBegin(_etype1226, _size1223); - this->success.resize(_size1223); - uint32_t _i1227; - for (_i1227 = 0; _i1227 < _size1223; ++_i1227) + uint32_t _size1231; + ::apache::thrift::protocol::TType _etype1234; + xfer += iprot->readListBegin(_etype1234, _size1231); + this->success.resize(_size1231); + uint32_t _i1235; + for (_i1235 = 0; _i1235 < _size1231; ++_i1235) { - xfer += iprot->readString(this->success[_i1227]); + xfer += iprot->readString(this->success[_i1235]); } xfer += iprot->readListEnd(); } @@ -16490,10 +16490,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1228; - for (_iter1228 = this->success.begin(); _iter1228 != this->success.end(); ++_iter1228) + std::vector ::const_iterator _iter1236; + for (_iter1236 = this->success.begin(); _iter1236 != this->success.end(); ++_iter1236) { - xfer += oprot->writeString((*_iter1228)); + xfer += oprot->writeString((*_iter1236)); } xfer += oprot->writeListEnd(); } @@ -16542,14 +16542,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1229; - ::apache::thrift::protocol::TType _etype1232; - xfer += iprot->readListBegin(_etype1232, _size1229); - (*(this->success)).resize(_size1229); - uint32_t _i1233; - for (_i1233 = 0; _i1233 < _size1229; ++_i1233) + uint32_t _size1237; + ::apache::thrift::protocol::TType _etype1240; + xfer += iprot->readListBegin(_etype1240, _size1237); + (*(this->success)).resize(_size1237); + uint32_t _i1241; + for (_i1241 = 0; _i1241 < _size1237; ++_i1241) { - xfer += iprot->readString((*(this->success))[_i1233]); + xfer += iprot->readString((*(this->success))[_i1241]); } xfer += iprot->readListEnd(); } @@ -16859,14 +16859,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1234; - ::apache::thrift::protocol::TType _etype1237; - xfer += iprot->readListBegin(_etype1237, _size1234); - this->part_vals.resize(_size1234); - uint32_t _i1238; - for (_i1238 = 0; _i1238 < _size1234; ++_i1238) + uint32_t _size1242; + ::apache::thrift::protocol::TType _etype1245; + xfer += iprot->readListBegin(_etype1245, _size1242); + this->part_vals.resize(_size1242); + uint32_t _i1246; + for (_i1246 = 0; _i1246 < _size1242; ++_i1246) { - xfer += iprot->readString(this->part_vals[_i1238]); + xfer += iprot->readString(this->part_vals[_i1246]); } xfer += iprot->readListEnd(); } @@ -16911,10 +16911,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1239; - for (_iter1239 = this->part_vals.begin(); _iter1239 != this->part_vals.end(); ++_iter1239) + std::vector ::const_iterator _iter1247; + for (_iter1247 = this->part_vals.begin(); _iter1247 != this->part_vals.end(); ++_iter1247) { - xfer += oprot->writeString((*_iter1239)); + xfer += oprot->writeString((*_iter1247)); } xfer += oprot->writeListEnd(); } @@ -16950,10 +16950,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1240; - for (_iter1240 = (*(this->part_vals)).begin(); _iter1240 != (*(this->part_vals)).end(); ++_iter1240) + std::vector ::const_iterator _iter1248; + for (_iter1248 = (*(this->part_vals)).begin(); _iter1248 != (*(this->part_vals)).end(); ++_iter1248) { - xfer += oprot->writeString((*_iter1240)); + xfer += oprot->writeString((*_iter1248)); } xfer += oprot->writeListEnd(); } @@ -16998,14 +16998,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1241; - ::apache::thrift::protocol::TType _etype1244; - xfer += iprot->readListBegin(_etype1244, _size1241); - this->success.resize(_size1241); - uint32_t _i1245; - for (_i1245 = 0; _i1245 < _size1241; ++_i1245) + uint32_t _size1249; + ::apache::thrift::protocol::TType _etype1252; + xfer += iprot->readListBegin(_etype1252, _size1249); + this->success.resize(_size1249); + uint32_t _i1253; + for (_i1253 = 0; _i1253 < _size1249; ++_i1253) { - xfer += this->success[_i1245].read(iprot); + xfer += this->success[_i1253].read(iprot); } xfer += iprot->readListEnd(); } @@ -17052,10 +17052,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1246; - for (_iter1246 = this->success.begin(); _iter1246 != this->success.end(); ++_iter1246) + std::vector ::const_iterator _iter1254; + for (_iter1254 = this->success.begin(); _iter1254 != this->success.end(); ++_iter1254) { - xfer += (*_iter1246).write(oprot); + xfer += (*_iter1254).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17104,14 +17104,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1247; - ::apache::thrift::protocol::TType _etype1250; - xfer += iprot->readListBegin(_etype1250, _size1247); - (*(this->success)).resize(_size1247); - uint32_t _i1251; - for (_i1251 = 0; _i1251 < _size1247; ++_i1251) + uint32_t _size1255; + ::apache::thrift::protocol::TType _etype1258; + xfer += iprot->readListBegin(_etype1258, _size1255); + (*(this->success)).resize(_size1255); + uint32_t _i1259; + for (_i1259 = 0; _i1259 < _size1255; ++_i1259) { - xfer += (*(this->success))[_i1251].read(iprot); + xfer += (*(this->success))[_i1259].read(iprot); } xfer += iprot->readListEnd(); } @@ -17194,14 +17194,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1252; - ::apache::thrift::protocol::TType _etype1255; - xfer += iprot->readListBegin(_etype1255, _size1252); - this->part_vals.resize(_size1252); - uint32_t _i1256; - for (_i1256 = 0; _i1256 < _size1252; ++_i1256) + uint32_t _size1260; + ::apache::thrift::protocol::TType _etype1263; + xfer += iprot->readListBegin(_etype1263, _size1260); + this->part_vals.resize(_size1260); + uint32_t _i1264; + for (_i1264 = 0; _i1264 < _size1260; ++_i1264) { - xfer += iprot->readString(this->part_vals[_i1256]); + xfer += iprot->readString(this->part_vals[_i1264]); } xfer += iprot->readListEnd(); } @@ -17230,14 +17230,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1257; - ::apache::thrift::protocol::TType _etype1260; - xfer += iprot->readListBegin(_etype1260, _size1257); - this->group_names.resize(_size1257); - uint32_t _i1261; - for (_i1261 = 0; _i1261 < _size1257; ++_i1261) + uint32_t _size1265; + ::apache::thrift::protocol::TType _etype1268; + xfer += iprot->readListBegin(_etype1268, _size1265); + this->group_names.resize(_size1265); + uint32_t _i1269; + for (_i1269 = 0; _i1269 < _size1265; ++_i1269) { - xfer += iprot->readString(this->group_names[_i1261]); + xfer += iprot->readString(this->group_names[_i1269]); } xfer += iprot->readListEnd(); } @@ -17274,10 +17274,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1262; - for (_iter1262 = this->part_vals.begin(); _iter1262 != this->part_vals.end(); ++_iter1262) + std::vector ::const_iterator _iter1270; + for (_iter1270 = this->part_vals.begin(); _iter1270 != this->part_vals.end(); ++_iter1270) { - xfer += oprot->writeString((*_iter1262)); + xfer += oprot->writeString((*_iter1270)); } xfer += oprot->writeListEnd(); } @@ -17294,10 +17294,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1263; - for (_iter1263 = this->group_names.begin(); _iter1263 != this->group_names.end(); ++_iter1263) + std::vector ::const_iterator _iter1271; + for (_iter1271 = this->group_names.begin(); _iter1271 != this->group_names.end(); ++_iter1271) { - xfer += oprot->writeString((*_iter1263)); + xfer += oprot->writeString((*_iter1271)); } xfer += oprot->writeListEnd(); } @@ -17329,10 +17329,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1264; - for (_iter1264 = (*(this->part_vals)).begin(); _iter1264 != (*(this->part_vals)).end(); ++_iter1264) + std::vector ::const_iterator _iter1272; + for (_iter1272 = (*(this->part_vals)).begin(); _iter1272 != (*(this->part_vals)).end(); ++_iter1272) { - xfer += oprot->writeString((*_iter1264)); + xfer += oprot->writeString((*_iter1272)); } xfer += oprot->writeListEnd(); } @@ -17349,10 +17349,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1265; - for (_iter1265 = (*(this->group_names)).begin(); _iter1265 != (*(this->group_names)).end(); ++_iter1265) + std::vector ::const_iterator _iter1273; + for (_iter1273 = (*(this->group_names)).begin(); _iter1273 != (*(this->group_names)).end(); ++_iter1273) { - xfer += oprot->writeString((*_iter1265)); + xfer += oprot->writeString((*_iter1273)); } xfer += oprot->writeListEnd(); } @@ -17393,14 +17393,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1266; - ::apache::thrift::protocol::TType _etype1269; - xfer += iprot->readListBegin(_etype1269, _size1266); - this->success.resize(_size1266); - uint32_t _i1270; - for (_i1270 = 0; _i1270 < _size1266; ++_i1270) + uint32_t _size1274; + ::apache::thrift::protocol::TType _etype1277; + xfer += iprot->readListBegin(_etype1277, _size1274); + this->success.resize(_size1274); + uint32_t _i1278; + for (_i1278 = 0; _i1278 < _size1274; ++_i1278) { - xfer += this->success[_i1270].read(iprot); + xfer += this->success[_i1278].read(iprot); } xfer += iprot->readListEnd(); } @@ -17447,10 +17447,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1271; - for (_iter1271 = this->success.begin(); _iter1271 != this->success.end(); ++_iter1271) + std::vector ::const_iterator _iter1279; + for (_iter1279 = this->success.begin(); _iter1279 != this->success.end(); ++_iter1279) { - xfer += (*_iter1271).write(oprot); + xfer += (*_iter1279).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17499,14 +17499,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1272; - ::apache::thrift::protocol::TType _etype1275; - xfer += iprot->readListBegin(_etype1275, _size1272); - (*(this->success)).resize(_size1272); - uint32_t _i1276; - for (_i1276 = 0; _i1276 < _size1272; ++_i1276) + uint32_t _size1280; + ::apache::thrift::protocol::TType _etype1283; + xfer += iprot->readListBegin(_etype1283, _size1280); + (*(this->success)).resize(_size1280); + uint32_t _i1284; + for (_i1284 = 0; _i1284 < _size1280; ++_i1284) { - xfer += (*(this->success))[_i1276].read(iprot); + xfer += (*(this->success))[_i1284].read(iprot); } xfer += iprot->readListEnd(); } @@ -17589,14 +17589,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1277; - ::apache::thrift::protocol::TType _etype1280; - xfer += iprot->readListBegin(_etype1280, _size1277); - this->part_vals.resize(_size1277); - uint32_t _i1281; - for (_i1281 = 0; _i1281 < _size1277; ++_i1281) + uint32_t _size1285; + ::apache::thrift::protocol::TType _etype1288; + xfer += iprot->readListBegin(_etype1288, _size1285); + this->part_vals.resize(_size1285); + uint32_t _i1289; + for (_i1289 = 0; _i1289 < _size1285; ++_i1289) { - xfer += iprot->readString(this->part_vals[_i1281]); + xfer += iprot->readString(this->part_vals[_i1289]); } xfer += iprot->readListEnd(); } @@ -17641,10 +17641,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1282; - for (_iter1282 = this->part_vals.begin(); _iter1282 != this->part_vals.end(); ++_iter1282) + std::vector ::const_iterator _iter1290; + for (_iter1290 = this->part_vals.begin(); _iter1290 != this->part_vals.end(); ++_iter1290) { - xfer += oprot->writeString((*_iter1282)); + xfer += oprot->writeString((*_iter1290)); } xfer += oprot->writeListEnd(); } @@ -17680,10 +17680,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1283; - for (_iter1283 = (*(this->part_vals)).begin(); _iter1283 != (*(this->part_vals)).end(); ++_iter1283) + std::vector ::const_iterator _iter1291; + for (_iter1291 = (*(this->part_vals)).begin(); _iter1291 != (*(this->part_vals)).end(); ++_iter1291) { - xfer += oprot->writeString((*_iter1283)); + xfer += oprot->writeString((*_iter1291)); } xfer += oprot->writeListEnd(); } @@ -17728,14 +17728,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1284; - ::apache::thrift::protocol::TType _etype1287; - xfer += iprot->readListBegin(_etype1287, _size1284); - this->success.resize(_size1284); - uint32_t _i1288; - for (_i1288 = 0; _i1288 < _size1284; ++_i1288) + uint32_t _size1292; + ::apache::thrift::protocol::TType _etype1295; + xfer += iprot->readListBegin(_etype1295, _size1292); + this->success.resize(_size1292); + uint32_t _i1296; + for (_i1296 = 0; _i1296 < _size1292; ++_i1296) { - xfer += iprot->readString(this->success[_i1288]); + xfer += iprot->readString(this->success[_i1296]); } xfer += iprot->readListEnd(); } @@ -17782,10 +17782,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1289; - for (_iter1289 = this->success.begin(); _iter1289 != this->success.end(); ++_iter1289) + std::vector ::const_iterator _iter1297; + for (_iter1297 = this->success.begin(); _iter1297 != this->success.end(); ++_iter1297) { - xfer += oprot->writeString((*_iter1289)); + xfer += oprot->writeString((*_iter1297)); } xfer += oprot->writeListEnd(); } @@ -17834,14 +17834,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1290; - ::apache::thrift::protocol::TType _etype1293; - xfer += iprot->readListBegin(_etype1293, _size1290); - (*(this->success)).resize(_size1290); - uint32_t _i1294; - for (_i1294 = 0; _i1294 < _size1290; ++_i1294) + uint32_t _size1298; + ::apache::thrift::protocol::TType _etype1301; + xfer += iprot->readListBegin(_etype1301, _size1298); + (*(this->success)).resize(_size1298); + uint32_t _i1302; + for (_i1302 = 0; _i1302 < _size1298; ++_i1302) { - xfer += iprot->readString((*(this->success))[_i1294]); + xfer += iprot->readString((*(this->success))[_i1302]); } xfer += iprot->readListEnd(); } @@ -18035,14 +18035,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1295; - ::apache::thrift::protocol::TType _etype1298; - xfer += iprot->readListBegin(_etype1298, _size1295); - this->success.resize(_size1295); - uint32_t _i1299; - for (_i1299 = 0; _i1299 < _size1295; ++_i1299) + uint32_t _size1303; + ::apache::thrift::protocol::TType _etype1306; + xfer += iprot->readListBegin(_etype1306, _size1303); + this->success.resize(_size1303); + uint32_t _i1307; + for (_i1307 = 0; _i1307 < _size1303; ++_i1307) { - xfer += this->success[_i1299].read(iprot); + xfer += this->success[_i1307].read(iprot); } xfer += iprot->readListEnd(); } @@ -18089,10 +18089,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1300; - for (_iter1300 = this->success.begin(); _iter1300 != this->success.end(); ++_iter1300) + std::vector ::const_iterator _iter1308; + for (_iter1308 = this->success.begin(); _iter1308 != this->success.end(); ++_iter1308) { - xfer += (*_iter1300).write(oprot); + xfer += (*_iter1308).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18141,14 +18141,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1301; - ::apache::thrift::protocol::TType _etype1304; - xfer += iprot->readListBegin(_etype1304, _size1301); - (*(this->success)).resize(_size1301); - uint32_t _i1305; - for (_i1305 = 0; _i1305 < _size1301; ++_i1305) + uint32_t _size1309; + ::apache::thrift::protocol::TType _etype1312; + xfer += iprot->readListBegin(_etype1312, _size1309); + (*(this->success)).resize(_size1309); + uint32_t _i1313; + for (_i1313 = 0; _i1313 < _size1309; ++_i1313) { - xfer += (*(this->success))[_i1305].read(iprot); + xfer += (*(this->success))[_i1313].read(iprot); } xfer += iprot->readListEnd(); } @@ -18342,14 +18342,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1306; - ::apache::thrift::protocol::TType _etype1309; - xfer += iprot->readListBegin(_etype1309, _size1306); - this->success.resize(_size1306); - uint32_t _i1310; - for (_i1310 = 0; _i1310 < _size1306; ++_i1310) + uint32_t _size1314; + ::apache::thrift::protocol::TType _etype1317; + xfer += iprot->readListBegin(_etype1317, _size1314); + this->success.resize(_size1314); + uint32_t _i1318; + for (_i1318 = 0; _i1318 < _size1314; ++_i1318) { - xfer += this->success[_i1310].read(iprot); + xfer += this->success[_i1318].read(iprot); } xfer += iprot->readListEnd(); } @@ -18396,10 +18396,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1311; - for (_iter1311 = this->success.begin(); _iter1311 != this->success.end(); ++_iter1311) + std::vector ::const_iterator _iter1319; + for (_iter1319 = this->success.begin(); _iter1319 != this->success.end(); ++_iter1319) { - xfer += (*_iter1311).write(oprot); + xfer += (*_iter1319).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18448,14 +18448,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1312; - ::apache::thrift::protocol::TType _etype1315; - xfer += iprot->readListBegin(_etype1315, _size1312); - (*(this->success)).resize(_size1312); - uint32_t _i1316; - for (_i1316 = 0; _i1316 < _size1312; ++_i1316) + uint32_t _size1320; + ::apache::thrift::protocol::TType _etype1323; + xfer += iprot->readListBegin(_etype1323, _size1320); + (*(this->success)).resize(_size1320); + uint32_t _i1324; + for (_i1324 = 0; _i1324 < _size1320; ++_i1324) { - xfer += (*(this->success))[_i1316].read(iprot); + xfer += (*(this->success))[_i1324].read(iprot); } xfer += iprot->readListEnd(); } @@ -19024,14 +19024,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size1317; - ::apache::thrift::protocol::TType _etype1320; - xfer += iprot->readListBegin(_etype1320, _size1317); - this->names.resize(_size1317); - uint32_t _i1321; - for (_i1321 = 0; _i1321 < _size1317; ++_i1321) + uint32_t _size1325; + ::apache::thrift::protocol::TType _etype1328; + xfer += iprot->readListBegin(_etype1328, _size1325); + this->names.resize(_size1325); + uint32_t _i1329; + for (_i1329 = 0; _i1329 < _size1325; ++_i1329) { - xfer += iprot->readString(this->names[_i1321]); + xfer += iprot->readString(this->names[_i1329]); } xfer += iprot->readListEnd(); } @@ -19068,10 +19068,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter1322; - for (_iter1322 = this->names.begin(); _iter1322 != this->names.end(); ++_iter1322) + std::vector ::const_iterator _iter1330; + for (_iter1330 = this->names.begin(); _iter1330 != this->names.end(); ++_iter1330) { - xfer += oprot->writeString((*_iter1322)); + xfer += oprot->writeString((*_iter1330)); } xfer += oprot->writeListEnd(); } @@ -19103,10 +19103,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter1323; - for (_iter1323 = (*(this->names)).begin(); _iter1323 != (*(this->names)).end(); ++_iter1323) + std::vector ::const_iterator _iter1331; + for (_iter1331 = (*(this->names)).begin(); _iter1331 != (*(this->names)).end(); ++_iter1331) { - xfer += oprot->writeString((*_iter1323)); + xfer += oprot->writeString((*_iter1331)); } xfer += oprot->writeListEnd(); } @@ -19147,14 +19147,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1324; - ::apache::thrift::protocol::TType _etype1327; - xfer += iprot->readListBegin(_etype1327, _size1324); - this->success.resize(_size1324); - uint32_t _i1328; - for (_i1328 = 0; _i1328 < _size1324; ++_i1328) + uint32_t _size1332; + ::apache::thrift::protocol::TType _etype1335; + xfer += iprot->readListBegin(_etype1335, _size1332); + this->success.resize(_size1332); + uint32_t _i1336; + for (_i1336 = 0; _i1336 < _size1332; ++_i1336) { - xfer += this->success[_i1328].read(iprot); + xfer += this->success[_i1336].read(iprot); } xfer += iprot->readListEnd(); } @@ -19201,10 +19201,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1329; - for (_iter1329 = this->success.begin(); _iter1329 != this->success.end(); ++_iter1329) + std::vector ::const_iterator _iter1337; + for (_iter1337 = this->success.begin(); _iter1337 != this->success.end(); ++_iter1337) { - xfer += (*_iter1329).write(oprot); + xfer += (*_iter1337).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19253,14 +19253,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1330; - ::apache::thrift::protocol::TType _etype1333; - xfer += iprot->readListBegin(_etype1333, _size1330); - (*(this->success)).resize(_size1330); - uint32_t _i1334; - for (_i1334 = 0; _i1334 < _size1330; ++_i1334) + uint32_t _size1338; + ::apache::thrift::protocol::TType _etype1341; + xfer += iprot->readListBegin(_etype1341, _size1338); + (*(this->success)).resize(_size1338); + uint32_t _i1342; + for (_i1342 = 0; _i1342 < _size1338; ++_i1342) { - xfer += (*(this->success))[_i1334].read(iprot); + xfer += (*(this->success))[_i1342].read(iprot); } xfer += iprot->readListEnd(); } @@ -19582,14 +19582,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1335; - ::apache::thrift::protocol::TType _etype1338; - xfer += iprot->readListBegin(_etype1338, _size1335); - this->new_parts.resize(_size1335); - uint32_t _i1339; - for (_i1339 = 0; _i1339 < _size1335; ++_i1339) + uint32_t _size1343; + ::apache::thrift::protocol::TType _etype1346; + xfer += iprot->readListBegin(_etype1346, _size1343); + this->new_parts.resize(_size1343); + uint32_t _i1347; + for (_i1347 = 0; _i1347 < _size1343; ++_i1347) { - xfer += this->new_parts[_i1339].read(iprot); + xfer += this->new_parts[_i1347].read(iprot); } xfer += iprot->readListEnd(); } @@ -19626,10 +19626,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1340; - for (_iter1340 = this->new_parts.begin(); _iter1340 != this->new_parts.end(); ++_iter1340) + std::vector ::const_iterator _iter1348; + for (_iter1348 = this->new_parts.begin(); _iter1348 != this->new_parts.end(); ++_iter1348) { - xfer += (*_iter1340).write(oprot); + xfer += (*_iter1348).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19661,10 +19661,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1341; - for (_iter1341 = (*(this->new_parts)).begin(); _iter1341 != (*(this->new_parts)).end(); ++_iter1341) + std::vector ::const_iterator _iter1349; + for (_iter1349 = (*(this->new_parts)).begin(); _iter1349 != (*(this->new_parts)).end(); ++_iter1349) { - xfer += (*_iter1341).write(oprot); + xfer += (*_iter1349).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19849,14 +19849,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1342; - ::apache::thrift::protocol::TType _etype1345; - xfer += iprot->readListBegin(_etype1345, _size1342); - this->new_parts.resize(_size1342); - uint32_t _i1346; - for (_i1346 = 0; _i1346 < _size1342; ++_i1346) + uint32_t _size1350; + ::apache::thrift::protocol::TType _etype1353; + xfer += iprot->readListBegin(_etype1353, _size1350); + this->new_parts.resize(_size1350); + uint32_t _i1354; + for (_i1354 = 0; _i1354 < _size1350; ++_i1354) { - xfer += this->new_parts[_i1346].read(iprot); + xfer += this->new_parts[_i1354].read(iprot); } xfer += iprot->readListEnd(); } @@ -19901,10 +19901,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1347; - for (_iter1347 = this->new_parts.begin(); _iter1347 != this->new_parts.end(); ++_iter1347) + std::vector ::const_iterator _iter1355; + for (_iter1355 = this->new_parts.begin(); _iter1355 != this->new_parts.end(); ++_iter1355) { - xfer += (*_iter1347).write(oprot); + xfer += (*_iter1355).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19940,10 +19940,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1348; - for (_iter1348 = (*(this->new_parts)).begin(); _iter1348 != (*(this->new_parts)).end(); ++_iter1348) + std::vector ::const_iterator _iter1356; + for (_iter1356 = (*(this->new_parts)).begin(); _iter1356 != (*(this->new_parts)).end(); ++_iter1356) { - xfer += (*_iter1348).write(oprot); + xfer += (*_iter1356).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20387,14 +20387,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1349; - ::apache::thrift::protocol::TType _etype1352; - xfer += iprot->readListBegin(_etype1352, _size1349); - this->part_vals.resize(_size1349); - uint32_t _i1353; - for (_i1353 = 0; _i1353 < _size1349; ++_i1353) + uint32_t _size1357; + ::apache::thrift::protocol::TType _etype1360; + xfer += iprot->readListBegin(_etype1360, _size1357); + this->part_vals.resize(_size1357); + uint32_t _i1361; + for (_i1361 = 0; _i1361 < _size1357; ++_i1361) { - xfer += iprot->readString(this->part_vals[_i1353]); + xfer += iprot->readString(this->part_vals[_i1361]); } xfer += iprot->readListEnd(); } @@ -20439,10 +20439,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1354; - for (_iter1354 = this->part_vals.begin(); _iter1354 != this->part_vals.end(); ++_iter1354) + std::vector ::const_iterator _iter1362; + for (_iter1362 = this->part_vals.begin(); _iter1362 != this->part_vals.end(); ++_iter1362) { - xfer += oprot->writeString((*_iter1354)); + xfer += oprot->writeString((*_iter1362)); } xfer += oprot->writeListEnd(); } @@ -20478,10 +20478,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1355; - for (_iter1355 = (*(this->part_vals)).begin(); _iter1355 != (*(this->part_vals)).end(); ++_iter1355) + std::vector ::const_iterator _iter1363; + for (_iter1363 = (*(this->part_vals)).begin(); _iter1363 != (*(this->part_vals)).end(); ++_iter1363) { - xfer += oprot->writeString((*_iter1355)); + xfer += oprot->writeString((*_iter1363)); } xfer += oprot->writeListEnd(); } @@ -20654,14 +20654,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1356; - ::apache::thrift::protocol::TType _etype1359; - xfer += iprot->readListBegin(_etype1359, _size1356); - this->part_vals.resize(_size1356); - uint32_t _i1360; - for (_i1360 = 0; _i1360 < _size1356; ++_i1360) + uint32_t _size1364; + ::apache::thrift::protocol::TType _etype1367; + xfer += iprot->readListBegin(_etype1367, _size1364); + this->part_vals.resize(_size1364); + uint32_t _i1368; + for (_i1368 = 0; _i1368 < _size1364; ++_i1368) { - xfer += iprot->readString(this->part_vals[_i1360]); + xfer += iprot->readString(this->part_vals[_i1368]); } xfer += iprot->readListEnd(); } @@ -20698,10 +20698,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1361; - for (_iter1361 = this->part_vals.begin(); _iter1361 != this->part_vals.end(); ++_iter1361) + std::vector ::const_iterator _iter1369; + for (_iter1369 = this->part_vals.begin(); _iter1369 != this->part_vals.end(); ++_iter1369) { - xfer += oprot->writeString((*_iter1361)); + xfer += oprot->writeString((*_iter1369)); } xfer += oprot->writeListEnd(); } @@ -20729,10 +20729,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1362; - for (_iter1362 = (*(this->part_vals)).begin(); _iter1362 != (*(this->part_vals)).end(); ++_iter1362) + std::vector ::const_iterator _iter1370; + for (_iter1370 = (*(this->part_vals)).begin(); _iter1370 != (*(this->part_vals)).end(); ++_iter1370) { - xfer += oprot->writeString((*_iter1362)); + xfer += oprot->writeString((*_iter1370)); } xfer += oprot->writeListEnd(); } @@ -21207,14 +21207,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1363; - ::apache::thrift::protocol::TType _etype1366; - xfer += iprot->readListBegin(_etype1366, _size1363); - this->success.resize(_size1363); - uint32_t _i1367; - for (_i1367 = 0; _i1367 < _size1363; ++_i1367) + uint32_t _size1371; + ::apache::thrift::protocol::TType _etype1374; + xfer += iprot->readListBegin(_etype1374, _size1371); + this->success.resize(_size1371); + uint32_t _i1375; + for (_i1375 = 0; _i1375 < _size1371; ++_i1375) { - xfer += iprot->readString(this->success[_i1367]); + xfer += iprot->readString(this->success[_i1375]); } xfer += iprot->readListEnd(); } @@ -21253,10 +21253,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1368; - for (_iter1368 = this->success.begin(); _iter1368 != this->success.end(); ++_iter1368) + std::vector ::const_iterator _iter1376; + for (_iter1376 = this->success.begin(); _iter1376 != this->success.end(); ++_iter1376) { - xfer += oprot->writeString((*_iter1368)); + xfer += oprot->writeString((*_iter1376)); } xfer += oprot->writeListEnd(); } @@ -21301,14 +21301,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1369; - ::apache::thrift::protocol::TType _etype1372; - xfer += iprot->readListBegin(_etype1372, _size1369); - (*(this->success)).resize(_size1369); - uint32_t _i1373; - for (_i1373 = 0; _i1373 < _size1369; ++_i1373) + uint32_t _size1377; + ::apache::thrift::protocol::TType _etype1380; + xfer += iprot->readListBegin(_etype1380, _size1377); + (*(this->success)).resize(_size1377); + uint32_t _i1381; + for (_i1381 = 0; _i1381 < _size1377; ++_i1381) { - xfer += iprot->readString((*(this->success))[_i1373]); + xfer += iprot->readString((*(this->success))[_i1381]); } xfer += iprot->readListEnd(); } @@ -21446,17 +21446,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1374; - ::apache::thrift::protocol::TType _ktype1375; - ::apache::thrift::protocol::TType _vtype1376; - xfer += iprot->readMapBegin(_ktype1375, _vtype1376, _size1374); - uint32_t _i1378; - for (_i1378 = 0; _i1378 < _size1374; ++_i1378) + uint32_t _size1382; + ::apache::thrift::protocol::TType _ktype1383; + ::apache::thrift::protocol::TType _vtype1384; + xfer += iprot->readMapBegin(_ktype1383, _vtype1384, _size1382); + uint32_t _i1386; + for (_i1386 = 0; _i1386 < _size1382; ++_i1386) { - std::string _key1379; - xfer += iprot->readString(_key1379); - std::string& _val1380 = this->success[_key1379]; - xfer += iprot->readString(_val1380); + std::string _key1387; + xfer += iprot->readString(_key1387); + std::string& _val1388 = this->success[_key1387]; + xfer += iprot->readString(_val1388); } xfer += iprot->readMapEnd(); } @@ -21495,11 +21495,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1381; - for (_iter1381 = this->success.begin(); _iter1381 != this->success.end(); ++_iter1381) + std::map ::const_iterator _iter1389; + for (_iter1389 = this->success.begin(); _iter1389 != this->success.end(); ++_iter1389) { - xfer += oprot->writeString(_iter1381->first); - xfer += oprot->writeString(_iter1381->second); + xfer += oprot->writeString(_iter1389->first); + xfer += oprot->writeString(_iter1389->second); } xfer += oprot->writeMapEnd(); } @@ -21544,17 +21544,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1382; - ::apache::thrift::protocol::TType _ktype1383; - ::apache::thrift::protocol::TType _vtype1384; - xfer += iprot->readMapBegin(_ktype1383, _vtype1384, _size1382); - uint32_t _i1386; - for (_i1386 = 0; _i1386 < _size1382; ++_i1386) + uint32_t _size1390; + ::apache::thrift::protocol::TType _ktype1391; + ::apache::thrift::protocol::TType _vtype1392; + xfer += iprot->readMapBegin(_ktype1391, _vtype1392, _size1390); + uint32_t _i1394; + for (_i1394 = 0; _i1394 < _size1390; ++_i1394) { - std::string _key1387; - xfer += iprot->readString(_key1387); - std::string& _val1388 = (*(this->success))[_key1387]; - xfer += iprot->readString(_val1388); + std::string _key1395; + xfer += iprot->readString(_key1395); + std::string& _val1396 = (*(this->success))[_key1395]; + xfer += iprot->readString(_val1396); } xfer += iprot->readMapEnd(); } @@ -21629,17 +21629,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1389; - ::apache::thrift::protocol::TType _ktype1390; - ::apache::thrift::protocol::TType _vtype1391; - xfer += iprot->readMapBegin(_ktype1390, _vtype1391, _size1389); - uint32_t _i1393; - for (_i1393 = 0; _i1393 < _size1389; ++_i1393) + uint32_t _size1397; + ::apache::thrift::protocol::TType _ktype1398; + ::apache::thrift::protocol::TType _vtype1399; + xfer += iprot->readMapBegin(_ktype1398, _vtype1399, _size1397); + uint32_t _i1401; + for (_i1401 = 0; _i1401 < _size1397; ++_i1401) { - std::string _key1394; - xfer += iprot->readString(_key1394); - std::string& _val1395 = this->part_vals[_key1394]; - xfer += iprot->readString(_val1395); + std::string _key1402; + xfer += iprot->readString(_key1402); + std::string& _val1403 = this->part_vals[_key1402]; + xfer += iprot->readString(_val1403); } xfer += iprot->readMapEnd(); } @@ -21650,9 +21650,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1396; - xfer += iprot->readI32(ecast1396); - this->eventType = (PartitionEventType::type)ecast1396; + int32_t ecast1404; + xfer += iprot->readI32(ecast1404); + this->eventType = (PartitionEventType::type)ecast1404; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -21686,11 +21686,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1397; - for (_iter1397 = this->part_vals.begin(); _iter1397 != this->part_vals.end(); ++_iter1397) + std::map ::const_iterator _iter1405; + for (_iter1405 = this->part_vals.begin(); _iter1405 != this->part_vals.end(); ++_iter1405) { - xfer += oprot->writeString(_iter1397->first); - xfer += oprot->writeString(_iter1397->second); + xfer += oprot->writeString(_iter1405->first); + xfer += oprot->writeString(_iter1405->second); } xfer += oprot->writeMapEnd(); } @@ -21726,11 +21726,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1398; - for (_iter1398 = (*(this->part_vals)).begin(); _iter1398 != (*(this->part_vals)).end(); ++_iter1398) + std::map ::const_iterator _iter1406; + for (_iter1406 = (*(this->part_vals)).begin(); _iter1406 != (*(this->part_vals)).end(); ++_iter1406) { - xfer += oprot->writeString(_iter1398->first); - xfer += oprot->writeString(_iter1398->second); + xfer += oprot->writeString(_iter1406->first); + xfer += oprot->writeString(_iter1406->second); } xfer += oprot->writeMapEnd(); } @@ -21999,17 +21999,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1399; - ::apache::thrift::protocol::TType _ktype1400; - ::apache::thrift::protocol::TType _vtype1401; - xfer += iprot->readMapBegin(_ktype1400, _vtype1401, _size1399); - uint32_t _i1403; - for (_i1403 = 0; _i1403 < _size1399; ++_i1403) + uint32_t _size1407; + ::apache::thrift::protocol::TType _ktype1408; + ::apache::thrift::protocol::TType _vtype1409; + xfer += iprot->readMapBegin(_ktype1408, _vtype1409, _size1407); + uint32_t _i1411; + for (_i1411 = 0; _i1411 < _size1407; ++_i1411) { - std::string _key1404; - xfer += iprot->readString(_key1404); - std::string& _val1405 = this->part_vals[_key1404]; - xfer += iprot->readString(_val1405); + std::string _key1412; + xfer += iprot->readString(_key1412); + std::string& _val1413 = this->part_vals[_key1412]; + xfer += iprot->readString(_val1413); } xfer += iprot->readMapEnd(); } @@ -22020,9 +22020,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1406; - xfer += iprot->readI32(ecast1406); - this->eventType = (PartitionEventType::type)ecast1406; + int32_t ecast1414; + xfer += iprot->readI32(ecast1414); + this->eventType = (PartitionEventType::type)ecast1414; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -22056,11 +22056,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1407; - for (_iter1407 = this->part_vals.begin(); _iter1407 != this->part_vals.end(); ++_iter1407) + std::map ::const_iterator _iter1415; + for (_iter1415 = this->part_vals.begin(); _iter1415 != this->part_vals.end(); ++_iter1415) { - xfer += oprot->writeString(_iter1407->first); - xfer += oprot->writeString(_iter1407->second); + xfer += oprot->writeString(_iter1415->first); + xfer += oprot->writeString(_iter1415->second); } xfer += oprot->writeMapEnd(); } @@ -22096,11 +22096,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1408; - for (_iter1408 = (*(this->part_vals)).begin(); _iter1408 != (*(this->part_vals)).end(); ++_iter1408) + std::map ::const_iterator _iter1416; + for (_iter1416 = (*(this->part_vals)).begin(); _iter1416 != (*(this->part_vals)).end(); ++_iter1416) { - xfer += oprot->writeString(_iter1408->first); - xfer += oprot->writeString(_iter1408->second); + xfer += oprot->writeString(_iter1416->first); + xfer += oprot->writeString(_iter1416->second); } xfer += oprot->writeMapEnd(); } @@ -23536,14 +23536,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1409; - ::apache::thrift::protocol::TType _etype1412; - xfer += iprot->readListBegin(_etype1412, _size1409); - this->success.resize(_size1409); - uint32_t _i1413; - for (_i1413 = 0; _i1413 < _size1409; ++_i1413) + uint32_t _size1417; + ::apache::thrift::protocol::TType _etype1420; + xfer += iprot->readListBegin(_etype1420, _size1417); + this->success.resize(_size1417); + uint32_t _i1421; + for (_i1421 = 0; _i1421 < _size1417; ++_i1421) { - xfer += this->success[_i1413].read(iprot); + xfer += this->success[_i1421].read(iprot); } xfer += iprot->readListEnd(); } @@ -23590,10 +23590,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1414; - for (_iter1414 = this->success.begin(); _iter1414 != this->success.end(); ++_iter1414) + std::vector ::const_iterator _iter1422; + for (_iter1422 = this->success.begin(); _iter1422 != this->success.end(); ++_iter1422) { - xfer += (*_iter1414).write(oprot); + xfer += (*_iter1422).write(oprot); } xfer += oprot->writeListEnd(); } @@ -23642,14 +23642,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1415; - ::apache::thrift::protocol::TType _etype1418; - xfer += iprot->readListBegin(_etype1418, _size1415); - (*(this->success)).resize(_size1415); - uint32_t _i1419; - for (_i1419 = 0; _i1419 < _size1415; ++_i1419) + uint32_t _size1423; + ::apache::thrift::protocol::TType _etype1426; + xfer += iprot->readListBegin(_etype1426, _size1423); + (*(this->success)).resize(_size1423); + uint32_t _i1427; + for (_i1427 = 0; _i1427 < _size1423; ++_i1427) { - xfer += (*(this->success))[_i1419].read(iprot); + xfer += (*(this->success))[_i1427].read(iprot); } xfer += iprot->readListEnd(); } @@ -23827,14 +23827,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1420; - ::apache::thrift::protocol::TType _etype1423; - xfer += iprot->readListBegin(_etype1423, _size1420); - this->success.resize(_size1420); - uint32_t _i1424; - for (_i1424 = 0; _i1424 < _size1420; ++_i1424) + uint32_t _size1428; + ::apache::thrift::protocol::TType _etype1431; + xfer += iprot->readListBegin(_etype1431, _size1428); + this->success.resize(_size1428); + uint32_t _i1432; + for (_i1432 = 0; _i1432 < _size1428; ++_i1432) { - xfer += iprot->readString(this->success[_i1424]); + xfer += iprot->readString(this->success[_i1432]); } xfer += iprot->readListEnd(); } @@ -23873,10 +23873,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1425; - for (_iter1425 = this->success.begin(); _iter1425 != this->success.end(); ++_iter1425) + std::vector ::const_iterator _iter1433; + for (_iter1433 = this->success.begin(); _iter1433 != this->success.end(); ++_iter1433) { - xfer += oprot->writeString((*_iter1425)); + xfer += oprot->writeString((*_iter1433)); } xfer += oprot->writeListEnd(); } @@ -23921,14 +23921,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1426; - ::apache::thrift::protocol::TType _etype1429; - xfer += iprot->readListBegin(_etype1429, _size1426); - (*(this->success)).resize(_size1426); - uint32_t _i1430; - for (_i1430 = 0; _i1430 < _size1426; ++_i1430) + uint32_t _size1434; + ::apache::thrift::protocol::TType _etype1437; + xfer += iprot->readListBegin(_etype1437, _size1434); + (*(this->success)).resize(_size1434); + uint32_t _i1438; + for (_i1438 = 0; _i1438 < _size1434; ++_i1438) { - xfer += iprot->readString((*(this->success))[_i1430]); + xfer += iprot->readString((*(this->success))[_i1438]); } xfer += iprot->readListEnd(); } @@ -28409,14 +28409,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1431; - ::apache::thrift::protocol::TType _etype1434; - xfer += iprot->readListBegin(_etype1434, _size1431); - this->success.resize(_size1431); - uint32_t _i1435; - for (_i1435 = 0; _i1435 < _size1431; ++_i1435) + uint32_t _size1439; + ::apache::thrift::protocol::TType _etype1442; + xfer += iprot->readListBegin(_etype1442, _size1439); + this->success.resize(_size1439); + uint32_t _i1443; + for (_i1443 = 0; _i1443 < _size1439; ++_i1443) { - xfer += iprot->readString(this->success[_i1435]); + xfer += iprot->readString(this->success[_i1443]); } xfer += iprot->readListEnd(); } @@ -28455,10 +28455,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1436; - for (_iter1436 = this->success.begin(); _iter1436 != this->success.end(); ++_iter1436) + std::vector ::const_iterator _iter1444; + for (_iter1444 = this->success.begin(); _iter1444 != this->success.end(); ++_iter1444) { - xfer += oprot->writeString((*_iter1436)); + xfer += oprot->writeString((*_iter1444)); } xfer += oprot->writeListEnd(); } @@ -28503,14 +28503,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1437; - ::apache::thrift::protocol::TType _etype1440; - xfer += iprot->readListBegin(_etype1440, _size1437); - (*(this->success)).resize(_size1437); - uint32_t _i1441; - for (_i1441 = 0; _i1441 < _size1437; ++_i1441) + uint32_t _size1445; + ::apache::thrift::protocol::TType _etype1448; + xfer += iprot->readListBegin(_etype1448, _size1445); + (*(this->success)).resize(_size1445); + uint32_t _i1449; + for (_i1449 = 0; _i1449 < _size1445; ++_i1449) { - xfer += iprot->readString((*(this->success))[_i1441]); + xfer += iprot->readString((*(this->success))[_i1449]); } xfer += iprot->readListEnd(); } @@ -29470,14 +29470,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1442; - ::apache::thrift::protocol::TType _etype1445; - xfer += iprot->readListBegin(_etype1445, _size1442); - this->success.resize(_size1442); - uint32_t _i1446; - for (_i1446 = 0; _i1446 < _size1442; ++_i1446) + uint32_t _size1450; + ::apache::thrift::protocol::TType _etype1453; + xfer += iprot->readListBegin(_etype1453, _size1450); + this->success.resize(_size1450); + uint32_t _i1454; + for (_i1454 = 0; _i1454 < _size1450; ++_i1454) { - xfer += iprot->readString(this->success[_i1446]); + xfer += iprot->readString(this->success[_i1454]); } xfer += iprot->readListEnd(); } @@ -29516,10 +29516,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1447; - for (_iter1447 = this->success.begin(); _iter1447 != this->success.end(); ++_iter1447) + std::vector ::const_iterator _iter1455; + for (_iter1455 = this->success.begin(); _iter1455 != this->success.end(); ++_iter1455) { - xfer += oprot->writeString((*_iter1447)); + xfer += oprot->writeString((*_iter1455)); } xfer += oprot->writeListEnd(); } @@ -29564,14 +29564,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1448; - ::apache::thrift::protocol::TType _etype1451; - xfer += iprot->readListBegin(_etype1451, _size1448); - (*(this->success)).resize(_size1448); - uint32_t _i1452; - for (_i1452 = 0; _i1452 < _size1448; ++_i1452) + uint32_t _size1456; + ::apache::thrift::protocol::TType _etype1459; + xfer += iprot->readListBegin(_etype1459, _size1456); + (*(this->success)).resize(_size1456); + uint32_t _i1460; + for (_i1460 = 0; _i1460 < _size1456; ++_i1460) { - xfer += iprot->readString((*(this->success))[_i1452]); + xfer += iprot->readString((*(this->success))[_i1460]); } xfer += iprot->readListEnd(); } @@ -29644,9 +29644,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1453; - xfer += iprot->readI32(ecast1453); - this->principal_type = (PrincipalType::type)ecast1453; + int32_t ecast1461; + xfer += iprot->readI32(ecast1461); + this->principal_type = (PrincipalType::type)ecast1461; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -29662,9 +29662,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1454; - xfer += iprot->readI32(ecast1454); - this->grantorType = (PrincipalType::type)ecast1454; + int32_t ecast1462; + xfer += iprot->readI32(ecast1462); + this->grantorType = (PrincipalType::type)ecast1462; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -29935,9 +29935,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1455; - xfer += iprot->readI32(ecast1455); - this->principal_type = (PrincipalType::type)ecast1455; + int32_t ecast1463; + xfer += iprot->readI32(ecast1463); + this->principal_type = (PrincipalType::type)ecast1463; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30168,9 +30168,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1456; - xfer += iprot->readI32(ecast1456); - this->principal_type = (PrincipalType::type)ecast1456; + int32_t ecast1464; + xfer += iprot->readI32(ecast1464); + this->principal_type = (PrincipalType::type)ecast1464; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30259,14 +30259,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1457; - ::apache::thrift::protocol::TType _etype1460; - xfer += iprot->readListBegin(_etype1460, _size1457); - this->success.resize(_size1457); - uint32_t _i1461; - for (_i1461 = 0; _i1461 < _size1457; ++_i1461) + uint32_t _size1465; + ::apache::thrift::protocol::TType _etype1468; + xfer += iprot->readListBegin(_etype1468, _size1465); + this->success.resize(_size1465); + uint32_t _i1469; + for (_i1469 = 0; _i1469 < _size1465; ++_i1469) { - xfer += this->success[_i1461].read(iprot); + xfer += this->success[_i1469].read(iprot); } xfer += iprot->readListEnd(); } @@ -30305,10 +30305,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1462; - for (_iter1462 = this->success.begin(); _iter1462 != this->success.end(); ++_iter1462) + std::vector ::const_iterator _iter1470; + for (_iter1470 = this->success.begin(); _iter1470 != this->success.end(); ++_iter1470) { - xfer += (*_iter1462).write(oprot); + xfer += (*_iter1470).write(oprot); } xfer += oprot->writeListEnd(); } @@ -30353,14 +30353,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1463; - ::apache::thrift::protocol::TType _etype1466; - xfer += iprot->readListBegin(_etype1466, _size1463); - (*(this->success)).resize(_size1463); - uint32_t _i1467; - for (_i1467 = 0; _i1467 < _size1463; ++_i1467) + uint32_t _size1471; + ::apache::thrift::protocol::TType _etype1474; + xfer += iprot->readListBegin(_etype1474, _size1471); + (*(this->success)).resize(_size1471); + uint32_t _i1475; + for (_i1475 = 0; _i1475 < _size1471; ++_i1475) { - xfer += (*(this->success))[_i1467].read(iprot); + xfer += (*(this->success))[_i1475].read(iprot); } xfer += iprot->readListEnd(); } @@ -31056,14 +31056,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1468; - ::apache::thrift::protocol::TType _etype1471; - xfer += iprot->readListBegin(_etype1471, _size1468); - this->group_names.resize(_size1468); - uint32_t _i1472; - for (_i1472 = 0; _i1472 < _size1468; ++_i1472) + uint32_t _size1476; + ::apache::thrift::protocol::TType _etype1479; + xfer += iprot->readListBegin(_etype1479, _size1476); + this->group_names.resize(_size1476); + uint32_t _i1480; + for (_i1480 = 0; _i1480 < _size1476; ++_i1480) { - xfer += iprot->readString(this->group_names[_i1472]); + xfer += iprot->readString(this->group_names[_i1480]); } xfer += iprot->readListEnd(); } @@ -31100,10 +31100,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1473; - for (_iter1473 = this->group_names.begin(); _iter1473 != this->group_names.end(); ++_iter1473) + std::vector ::const_iterator _iter1481; + for (_iter1481 = this->group_names.begin(); _iter1481 != this->group_names.end(); ++_iter1481) { - xfer += oprot->writeString((*_iter1473)); + xfer += oprot->writeString((*_iter1481)); } xfer += oprot->writeListEnd(); } @@ -31135,10 +31135,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1474; - for (_iter1474 = (*(this->group_names)).begin(); _iter1474 != (*(this->group_names)).end(); ++_iter1474) + std::vector ::const_iterator _iter1482; + for (_iter1482 = (*(this->group_names)).begin(); _iter1482 != (*(this->group_names)).end(); ++_iter1482) { - xfer += oprot->writeString((*_iter1474)); + xfer += oprot->writeString((*_iter1482)); } xfer += oprot->writeListEnd(); } @@ -31313,9 +31313,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1475; - xfer += iprot->readI32(ecast1475); - this->principal_type = (PrincipalType::type)ecast1475; + int32_t ecast1483; + xfer += iprot->readI32(ecast1483); + this->principal_type = (PrincipalType::type)ecast1483; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -31420,14 +31420,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1476; - ::apache::thrift::protocol::TType _etype1479; - xfer += iprot->readListBegin(_etype1479, _size1476); - this->success.resize(_size1476); - uint32_t _i1480; - for (_i1480 = 0; _i1480 < _size1476; ++_i1480) + uint32_t _size1484; + ::apache::thrift::protocol::TType _etype1487; + xfer += iprot->readListBegin(_etype1487, _size1484); + this->success.resize(_size1484); + uint32_t _i1488; + for (_i1488 = 0; _i1488 < _size1484; ++_i1488) { - xfer += this->success[_i1480].read(iprot); + xfer += this->success[_i1488].read(iprot); } xfer += iprot->readListEnd(); } @@ -31466,10 +31466,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1481; - for (_iter1481 = this->success.begin(); _iter1481 != this->success.end(); ++_iter1481) + std::vector ::const_iterator _iter1489; + for (_iter1489 = this->success.begin(); _iter1489 != this->success.end(); ++_iter1489) { - xfer += (*_iter1481).write(oprot); + xfer += (*_iter1489).write(oprot); } xfer += oprot->writeListEnd(); } @@ -31514,14 +31514,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1482; - ::apache::thrift::protocol::TType _etype1485; - xfer += iprot->readListBegin(_etype1485, _size1482); - (*(this->success)).resize(_size1482); - uint32_t _i1486; - for (_i1486 = 0; _i1486 < _size1482; ++_i1486) + uint32_t _size1490; + ::apache::thrift::protocol::TType _etype1493; + xfer += iprot->readListBegin(_etype1493, _size1490); + (*(this->success)).resize(_size1490); + uint32_t _i1494; + for (_i1494 = 0; _i1494 < _size1490; ++_i1494) { - xfer += (*(this->success))[_i1486].read(iprot); + xfer += (*(this->success))[_i1494].read(iprot); } xfer += iprot->readListEnd(); } @@ -32209,14 +32209,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1487; - ::apache::thrift::protocol::TType _etype1490; - xfer += iprot->readListBegin(_etype1490, _size1487); - this->group_names.resize(_size1487); - uint32_t _i1491; - for (_i1491 = 0; _i1491 < _size1487; ++_i1491) + uint32_t _size1495; + ::apache::thrift::protocol::TType _etype1498; + xfer += iprot->readListBegin(_etype1498, _size1495); + this->group_names.resize(_size1495); + uint32_t _i1499; + for (_i1499 = 0; _i1499 < _size1495; ++_i1499) { - xfer += iprot->readString(this->group_names[_i1491]); + xfer += iprot->readString(this->group_names[_i1499]); } xfer += iprot->readListEnd(); } @@ -32249,10 +32249,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1492; - for (_iter1492 = this->group_names.begin(); _iter1492 != this->group_names.end(); ++_iter1492) + std::vector ::const_iterator _iter1500; + for (_iter1500 = this->group_names.begin(); _iter1500 != this->group_names.end(); ++_iter1500) { - xfer += oprot->writeString((*_iter1492)); + xfer += oprot->writeString((*_iter1500)); } xfer += oprot->writeListEnd(); } @@ -32280,10 +32280,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1493; - for (_iter1493 = (*(this->group_names)).begin(); _iter1493 != (*(this->group_names)).end(); ++_iter1493) + std::vector ::const_iterator _iter1501; + for (_iter1501 = (*(this->group_names)).begin(); _iter1501 != (*(this->group_names)).end(); ++_iter1501) { - xfer += oprot->writeString((*_iter1493)); + xfer += oprot->writeString((*_iter1501)); } xfer += oprot->writeListEnd(); } @@ -32324,14 +32324,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1494; - ::apache::thrift::protocol::TType _etype1497; - xfer += iprot->readListBegin(_etype1497, _size1494); - this->success.resize(_size1494); - uint32_t _i1498; - for (_i1498 = 0; _i1498 < _size1494; ++_i1498) + uint32_t _size1502; + ::apache::thrift::protocol::TType _etype1505; + xfer += iprot->readListBegin(_etype1505, _size1502); + this->success.resize(_size1502); + uint32_t _i1506; + for (_i1506 = 0; _i1506 < _size1502; ++_i1506) { - xfer += iprot->readString(this->success[_i1498]); + xfer += iprot->readString(this->success[_i1506]); } xfer += iprot->readListEnd(); } @@ -32370,10 +32370,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1499; - for (_iter1499 = this->success.begin(); _iter1499 != this->success.end(); ++_iter1499) + std::vector ::const_iterator _iter1507; + for (_iter1507 = this->success.begin(); _iter1507 != this->success.end(); ++_iter1507) { - xfer += oprot->writeString((*_iter1499)); + xfer += oprot->writeString((*_iter1507)); } xfer += oprot->writeListEnd(); } @@ -32418,14 +32418,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1500; - ::apache::thrift::protocol::TType _etype1503; - xfer += iprot->readListBegin(_etype1503, _size1500); - (*(this->success)).resize(_size1500); - uint32_t _i1504; - for (_i1504 = 0; _i1504 < _size1500; ++_i1504) + uint32_t _size1508; + ::apache::thrift::protocol::TType _etype1511; + xfer += iprot->readListBegin(_etype1511, _size1508); + (*(this->success)).resize(_size1508); + uint32_t _i1512; + for (_i1512 = 0; _i1512 < _size1508; ++_i1512) { - xfer += iprot->readString((*(this->success))[_i1504]); + xfer += iprot->readString((*(this->success))[_i1512]); } xfer += iprot->readListEnd(); } @@ -33736,14 +33736,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1505; - ::apache::thrift::protocol::TType _etype1508; - xfer += iprot->readListBegin(_etype1508, _size1505); - this->success.resize(_size1505); - uint32_t _i1509; - for (_i1509 = 0; _i1509 < _size1505; ++_i1509) + uint32_t _size1513; + ::apache::thrift::protocol::TType _etype1516; + xfer += iprot->readListBegin(_etype1516, _size1513); + this->success.resize(_size1513); + uint32_t _i1517; + for (_i1517 = 0; _i1517 < _size1513; ++_i1517) { - xfer += iprot->readString(this->success[_i1509]); + xfer += iprot->readString(this->success[_i1517]); } xfer += iprot->readListEnd(); } @@ -33774,10 +33774,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1510; - for (_iter1510 = this->success.begin(); _iter1510 != this->success.end(); ++_iter1510) + std::vector ::const_iterator _iter1518; + for (_iter1518 = this->success.begin(); _iter1518 != this->success.end(); ++_iter1518) { - xfer += oprot->writeString((*_iter1510)); + xfer += oprot->writeString((*_iter1518)); } xfer += oprot->writeListEnd(); } @@ -33818,14 +33818,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1511; - ::apache::thrift::protocol::TType _etype1514; - xfer += iprot->readListBegin(_etype1514, _size1511); - (*(this->success)).resize(_size1511); - uint32_t _i1515; - for (_i1515 = 0; _i1515 < _size1511; ++_i1515) + uint32_t _size1519; + ::apache::thrift::protocol::TType _etype1522; + xfer += iprot->readListBegin(_etype1522, _size1519); + (*(this->success)).resize(_size1519); + uint32_t _i1523; + for (_i1523 = 0; _i1523 < _size1519; ++_i1523) { - xfer += iprot->readString((*(this->success))[_i1515]); + xfer += iprot->readString((*(this->success))[_i1523]); } xfer += iprot->readListEnd(); } @@ -34551,14 +34551,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1516; - ::apache::thrift::protocol::TType _etype1519; - xfer += iprot->readListBegin(_etype1519, _size1516); - this->success.resize(_size1516); - uint32_t _i1520; - for (_i1520 = 0; _i1520 < _size1516; ++_i1520) + uint32_t _size1524; + ::apache::thrift::protocol::TType _etype1527; + xfer += iprot->readListBegin(_etype1527, _size1524); + this->success.resize(_size1524); + uint32_t _i1528; + for (_i1528 = 0; _i1528 < _size1524; ++_i1528) { - xfer += iprot->readString(this->success[_i1520]); + xfer += iprot->readString(this->success[_i1528]); } xfer += iprot->readListEnd(); } @@ -34589,10 +34589,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1521; - for (_iter1521 = this->success.begin(); _iter1521 != this->success.end(); ++_iter1521) + std::vector ::const_iterator _iter1529; + for (_iter1529 = this->success.begin(); _iter1529 != this->success.end(); ++_iter1529) { - xfer += oprot->writeString((*_iter1521)); + xfer += oprot->writeString((*_iter1529)); } xfer += oprot->writeListEnd(); } @@ -34633,14 +34633,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1522; - ::apache::thrift::protocol::TType _etype1525; - xfer += iprot->readListBegin(_etype1525, _size1522); - (*(this->success)).resize(_size1522); - uint32_t _i1526; - for (_i1526 = 0; _i1526 < _size1522; ++_i1526) + uint32_t _size1530; + ::apache::thrift::protocol::TType _etype1533; + xfer += iprot->readListBegin(_etype1533, _size1530); + (*(this->success)).resize(_size1530); + uint32_t _i1534; + for (_i1534 = 0; _i1534 < _size1530; ++_i1534) { - xfer += iprot->readString((*(this->success))[_i1526]); + xfer += iprot->readString((*(this->success))[_i1534]); } xfer += iprot->readListEnd(); } diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index b3c9798086..79da92ef4b 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -20760,6 +20760,674 @@ void TableMeta::printTo(std::ostream& out) const { } +WMResourcePlan::~WMResourcePlan() throw() { +} + + +void WMResourcePlan::__set_name(const std::string& val) { + this->name = val; +} + +void WMResourcePlan::__set_status(const std::string& val) { + this->status = val; +__isset.status = true; +} + +void WMResourcePlan::__set_queryParallelism(const int32_t val) { + this->queryParallelism = val; +__isset.queryParallelism = true; +} + +uint32_t WMResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_name = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + isset_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->status); + this->__isset.status = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->queryParallelism); + this->__isset.queryParallelism = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_name) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t WMResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("WMResourcePlan"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.status) { + xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->status); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.queryParallelism) { + xfer += oprot->writeFieldBegin("queryParallelism", ::apache::thrift::protocol::T_I32, 3); + xfer += oprot->writeI32(this->queryParallelism); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(WMResourcePlan &a, WMResourcePlan &b) { + using ::std::swap; + swap(a.name, b.name); + swap(a.status, b.status); + swap(a.queryParallelism, b.queryParallelism); + swap(a.__isset, b.__isset); +} + +WMResourcePlan::WMResourcePlan(const WMResourcePlan& other863) { + name = other863.name; + status = other863.status; + queryParallelism = other863.queryParallelism; + __isset = other863.__isset; +} +WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other864) { + name = other864.name; + status = other864.status; + queryParallelism = other864.queryParallelism; + __isset = other864.__isset; + return *this; +} +void WMResourcePlan::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "WMResourcePlan("; + out << "name=" << to_string(name); + out << ", " << "status="; (__isset.status ? (out << to_string(status)) : (out << "")); + out << ", " << "queryParallelism="; (__isset.queryParallelism ? (out << to_string(queryParallelism)) : (out << "")); + out << ")"; +} + + +WMPool::~WMPool() throw() { +} + + +void WMPool::__set_resourcePlanName(const std::string& val) { + this->resourcePlanName = val; +} + +void WMPool::__set_poolName(const std::string& val) { + this->poolName = val; +} + +void WMPool::__set_parentPoolName(const std::string& val) { + this->parentPoolName = val; +__isset.parentPoolName = true; +} + +void WMPool::__set_allocFraction(const double val) { + this->allocFraction = val; +__isset.allocFraction = true; +} + +void WMPool::__set_queryParallelism(const int32_t val) { + this->queryParallelism = val; +__isset.queryParallelism = true; +} + +void WMPool::__set_schedulingPolicy(const std::string& val) { + this->schedulingPolicy = val; +__isset.schedulingPolicy = true; +} + +uint32_t WMPool::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_resourcePlanName = false; + bool isset_poolName = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->resourcePlanName); + isset_resourcePlanName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->poolName); + isset_poolName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->parentPoolName); + this->__isset.parentPoolName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_DOUBLE) { + xfer += iprot->readDouble(this->allocFraction); + this->__isset.allocFraction = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->queryParallelism); + this->__isset.queryParallelism = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->schedulingPolicy); + this->__isset.schedulingPolicy = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_resourcePlanName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_poolName) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t WMPool::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("WMPool"); + + xfer += oprot->writeFieldBegin("resourcePlanName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->resourcePlanName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("poolName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->poolName); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.parentPoolName) { + xfer += oprot->writeFieldBegin("parentPoolName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->parentPoolName); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.allocFraction) { + xfer += oprot->writeFieldBegin("allocFraction", ::apache::thrift::protocol::T_DOUBLE, 4); + xfer += oprot->writeDouble(this->allocFraction); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.queryParallelism) { + xfer += oprot->writeFieldBegin("queryParallelism", ::apache::thrift::protocol::T_I32, 5); + xfer += oprot->writeI32(this->queryParallelism); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.schedulingPolicy) { + xfer += oprot->writeFieldBegin("schedulingPolicy", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->schedulingPolicy); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(WMPool &a, WMPool &b) { + using ::std::swap; + swap(a.resourcePlanName, b.resourcePlanName); + swap(a.poolName, b.poolName); + swap(a.parentPoolName, b.parentPoolName); + swap(a.allocFraction, b.allocFraction); + swap(a.queryParallelism, b.queryParallelism); + swap(a.schedulingPolicy, b.schedulingPolicy); + swap(a.__isset, b.__isset); +} + +WMPool::WMPool(const WMPool& other865) { + resourcePlanName = other865.resourcePlanName; + poolName = other865.poolName; + parentPoolName = other865.parentPoolName; + allocFraction = other865.allocFraction; + queryParallelism = other865.queryParallelism; + schedulingPolicy = other865.schedulingPolicy; + __isset = other865.__isset; +} +WMPool& WMPool::operator=(const WMPool& other866) { + resourcePlanName = other866.resourcePlanName; + poolName = other866.poolName; + parentPoolName = other866.parentPoolName; + allocFraction = other866.allocFraction; + queryParallelism = other866.queryParallelism; + schedulingPolicy = other866.schedulingPolicy; + __isset = other866.__isset; + return *this; +} +void WMPool::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "WMPool("; + out << "resourcePlanName=" << to_string(resourcePlanName); + out << ", " << "poolName=" << to_string(poolName); + out << ", " << "parentPoolName="; (__isset.parentPoolName ? (out << to_string(parentPoolName)) : (out << "")); + out << ", " << "allocFraction="; (__isset.allocFraction ? (out << to_string(allocFraction)) : (out << "")); + out << ", " << "queryParallelism="; (__isset.queryParallelism ? (out << to_string(queryParallelism)) : (out << "")); + out << ", " << "schedulingPolicy="; (__isset.schedulingPolicy ? (out << to_string(schedulingPolicy)) : (out << "")); + out << ")"; +} + + +WMTrigger::~WMTrigger() throw() { +} + + +void WMTrigger::__set_resourcePlanName(const std::string& val) { + this->resourcePlanName = val; +} + +void WMTrigger::__set_poolName(const std::string& val) { + this->poolName = val; +} + +void WMTrigger::__set_triggerExpression(const std::string& val) { + this->triggerExpression = val; +__isset.triggerExpression = true; +} + +void WMTrigger::__set_actionExpression(const std::string& val) { + this->actionExpression = val; +__isset.actionExpression = true; +} + +uint32_t WMTrigger::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_resourcePlanName = false; + bool isset_poolName = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->resourcePlanName); + isset_resourcePlanName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->poolName); + isset_poolName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->triggerExpression); + this->__isset.triggerExpression = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->actionExpression); + this->__isset.actionExpression = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_resourcePlanName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_poolName) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t WMTrigger::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("WMTrigger"); + + xfer += oprot->writeFieldBegin("resourcePlanName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->resourcePlanName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("poolName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->poolName); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.triggerExpression) { + xfer += oprot->writeFieldBegin("triggerExpression", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->triggerExpression); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.actionExpression) { + xfer += oprot->writeFieldBegin("actionExpression", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->actionExpression); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(WMTrigger &a, WMTrigger &b) { + using ::std::swap; + swap(a.resourcePlanName, b.resourcePlanName); + swap(a.poolName, b.poolName); + swap(a.triggerExpression, b.triggerExpression); + swap(a.actionExpression, b.actionExpression); + swap(a.__isset, b.__isset); +} + +WMTrigger::WMTrigger(const WMTrigger& other867) { + resourcePlanName = other867.resourcePlanName; + poolName = other867.poolName; + triggerExpression = other867.triggerExpression; + actionExpression = other867.actionExpression; + __isset = other867.__isset; +} +WMTrigger& WMTrigger::operator=(const WMTrigger& other868) { + resourcePlanName = other868.resourcePlanName; + poolName = other868.poolName; + triggerExpression = other868.triggerExpression; + actionExpression = other868.actionExpression; + __isset = other868.__isset; + return *this; +} +void WMTrigger::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "WMTrigger("; + out << "resourcePlanName=" << to_string(resourcePlanName); + out << ", " << "poolName=" << to_string(poolName); + out << ", " << "triggerExpression="; (__isset.triggerExpression ? (out << to_string(triggerExpression)) : (out << "")); + out << ", " << "actionExpression="; (__isset.actionExpression ? (out << to_string(actionExpression)) : (out << "")); + out << ")"; +} + + +WMMapping::~WMMapping() throw() { +} + + +void WMMapping::__set_resourcePlanName(const std::string& val) { + this->resourcePlanName = val; +} + +void WMMapping::__set_entityType(const std::string& val) { + this->entityType = val; +} + +void WMMapping::__set_entityName(const std::string& val) { + this->entityName = val; +} + +void WMMapping::__set_poolName(const std::string& val) { + this->poolName = val; +__isset.poolName = true; +} + +void WMMapping::__set_ordering(const int32_t val) { + this->ordering = val; +__isset.ordering = true; +} + +uint32_t WMMapping::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_resourcePlanName = false; + bool isset_entityType = false; + bool isset_entityName = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->resourcePlanName); + isset_resourcePlanName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->entityType); + isset_entityType = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->entityName); + isset_entityName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->poolName); + this->__isset.poolName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->ordering); + this->__isset.ordering = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_resourcePlanName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_entityType) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_entityName) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t WMMapping::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("WMMapping"); + + xfer += oprot->writeFieldBegin("resourcePlanName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->resourcePlanName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("entityType", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->entityType); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("entityName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->entityName); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.poolName) { + xfer += oprot->writeFieldBegin("poolName", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->poolName); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.ordering) { + xfer += oprot->writeFieldBegin("ordering", ::apache::thrift::protocol::T_I32, 5); + xfer += oprot->writeI32(this->ordering); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(WMMapping &a, WMMapping &b) { + using ::std::swap; + swap(a.resourcePlanName, b.resourcePlanName); + swap(a.entityType, b.entityType); + swap(a.entityName, b.entityName); + swap(a.poolName, b.poolName); + swap(a.ordering, b.ordering); + swap(a.__isset, b.__isset); +} + +WMMapping::WMMapping(const WMMapping& other869) { + resourcePlanName = other869.resourcePlanName; + entityType = other869.entityType; + entityName = other869.entityName; + poolName = other869.poolName; + ordering = other869.ordering; + __isset = other869.__isset; +} +WMMapping& WMMapping::operator=(const WMMapping& other870) { + resourcePlanName = other870.resourcePlanName; + entityType = other870.entityType; + entityName = other870.entityName; + poolName = other870.poolName; + ordering = other870.ordering; + __isset = other870.__isset; + return *this; +} +void WMMapping::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "WMMapping("; + out << "resourcePlanName=" << to_string(resourcePlanName); + out << ", " << "entityType=" << to_string(entityType); + out << ", " << "entityName=" << to_string(entityName); + out << ", " << "poolName="; (__isset.poolName ? (out << to_string(poolName)) : (out << "")); + out << ", " << "ordering="; (__isset.ordering ? (out << to_string(ordering)) : (out << "")); + out << ")"; +} + + MetaException::~MetaException() throw() { } @@ -20829,13 +21497,13 @@ void swap(MetaException &a, MetaException &b) { swap(a.__isset, b.__isset); } -MetaException::MetaException(const MetaException& other863) : TException() { - message = other863.message; - __isset = other863.__isset; +MetaException::MetaException(const MetaException& other871) : TException() { + message = other871.message; + __isset = other871.__isset; } -MetaException& MetaException::operator=(const MetaException& other864) { - message = other864.message; - __isset = other864.__isset; +MetaException& MetaException::operator=(const MetaException& other872) { + message = other872.message; + __isset = other872.__isset; return *this; } void MetaException::printTo(std::ostream& out) const { @@ -20926,13 +21594,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other865) : TException() { - message = other865.message; - __isset = other865.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other873) : TException() { + message = other873.message; + __isset = other873.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other866) { - message = other866.message; - __isset = other866.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other874) { + message = other874.message; + __isset = other874.__isset; return *this; } void UnknownTableException::printTo(std::ostream& out) const { @@ -21023,13 +21691,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other867) : TException() { - message = other867.message; - __isset = other867.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other875) : TException() { + message = other875.message; + __isset = other875.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other868) { - message = other868.message; - __isset = other868.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other876) { + message = other876.message; + __isset = other876.__isset; return *this; } void UnknownDBException::printTo(std::ostream& out) const { @@ -21120,13 +21788,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other869) : TException() { - message = other869.message; - __isset = other869.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other877) : TException() { + message = other877.message; + __isset = other877.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other870) { - message = other870.message; - __isset = other870.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other878) { + message = other878.message; + __isset = other878.__isset; return *this; } void AlreadyExistsException::printTo(std::ostream& out) const { @@ -21217,13 +21885,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other871) : TException() { - message = other871.message; - __isset = other871.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other879) : TException() { + message = other879.message; + __isset = other879.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other872) { - message = other872.message; - __isset = other872.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other880) { + message = other880.message; + __isset = other880.__isset; return *this; } void InvalidPartitionException::printTo(std::ostream& out) const { @@ -21314,13 +21982,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other873) : TException() { - message = other873.message; - __isset = other873.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other881) : TException() { + message = other881.message; + __isset = other881.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other874) { - message = other874.message; - __isset = other874.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other882) { + message = other882.message; + __isset = other882.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -21411,13 +22079,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other875) : TException() { - message = other875.message; - __isset = other875.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other883) : TException() { + message = other883.message; + __isset = other883.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other876) { - message = other876.message; - __isset = other876.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other884) { + message = other884.message; + __isset = other884.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -21508,13 +22176,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other877) : TException() { - message = other877.message; - __isset = other877.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other885) : TException() { + message = other885.message; + __isset = other885.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other878) { - message = other878.message; - __isset = other878.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other886) { + message = other886.message; + __isset = other886.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -21605,13 +22273,13 @@ void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) { swap(a.__isset, b.__isset); } -IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other879) : TException() { - message = other879.message; - __isset = other879.__isset; +IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other887) : TException() { + message = other887.message; + __isset = other887.__isset; } -IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other880) { - message = other880.message; - __isset = other880.__isset; +IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other888) { + message = other888.message; + __isset = other888.__isset; return *this; } void IndexAlreadyExistsException::printTo(std::ostream& out) const { @@ -21702,13 +22370,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other881) : TException() { - message = other881.message; - __isset = other881.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other889) : TException() { + message = other889.message; + __isset = other889.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other882) { - message = other882.message; - __isset = other882.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other890) { + message = other890.message; + __isset = other890.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -21799,13 +22467,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other883) : TException() { - message = other883.message; - __isset = other883.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other891) : TException() { + message = other891.message; + __isset = other891.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other884) { - message = other884.message; - __isset = other884.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other892) { + message = other892.message; + __isset = other892.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -21896,13 +22564,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other885) : TException() { - message = other885.message; - __isset = other885.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other893) : TException() { + message = other893.message; + __isset = other893.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other886) { - message = other886.message; - __isset = other886.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other894) { + message = other894.message; + __isset = other894.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -21993,13 +22661,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other887) : TException() { - message = other887.message; - __isset = other887.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other895) : TException() { + message = other895.message; + __isset = other895.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other888) { - message = other888.message; - __isset = other888.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other896) { + message = other896.message; + __isset = other896.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -22090,13 +22758,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other889) : TException() { - message = other889.message; - __isset = other889.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other897) : TException() { + message = other897.message; + __isset = other897.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other890) { - message = other890.message; - __isset = other890.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other898) { + message = other898.message; + __isset = other898.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -22187,13 +22855,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other891) : TException() { - message = other891.message; - __isset = other891.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other899) : TException() { + message = other899.message; + __isset = other899.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other892) { - message = other892.message; - __isset = other892.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other900) { + message = other900.message; + __isset = other900.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -22284,13 +22952,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other893) : TException() { - message = other893.message; - __isset = other893.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other901) : TException() { + message = other901.message; + __isset = other901.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other894) { - message = other894.message; - __isset = other894.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other902) { + message = other902.message; + __isset = other902.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index fd9014d208..9c883ca04b 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -438,6 +438,14 @@ class CmRecycleResponse; class TableMeta; +class WMResourcePlan; + +class WMPool; + +class WMTrigger; + +class WMMapping; + class MetaException; class UnknownTableException; @@ -8441,6 +8449,286 @@ inline std::ostream& operator<<(std::ostream& out, const TableMeta& obj) return out; } +typedef struct _WMResourcePlan__isset { + _WMResourcePlan__isset() : status(false), queryParallelism(false) {} + bool status :1; + bool queryParallelism :1; +} _WMResourcePlan__isset; + +class WMResourcePlan { + public: + + WMResourcePlan(const WMResourcePlan&); + WMResourcePlan& operator=(const WMResourcePlan&); + WMResourcePlan() : name(), status(), queryParallelism(0) { + } + + virtual ~WMResourcePlan() throw(); + std::string name; + std::string status; + int32_t queryParallelism; + + _WMResourcePlan__isset __isset; + + void __set_name(const std::string& val); + + void __set_status(const std::string& val); + + void __set_queryParallelism(const int32_t val); + + bool operator == (const WMResourcePlan & rhs) const + { + if (!(name == rhs.name)) + return false; + if (__isset.status != rhs.__isset.status) + return false; + else if (__isset.status && !(status == rhs.status)) + return false; + if (__isset.queryParallelism != rhs.__isset.queryParallelism) + return false; + else if (__isset.queryParallelism && !(queryParallelism == rhs.queryParallelism)) + return false; + return true; + } + bool operator != (const WMResourcePlan &rhs) const { + return !(*this == rhs); + } + + bool operator < (const WMResourcePlan & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(WMResourcePlan &a, WMResourcePlan &b); + +inline std::ostream& operator<<(std::ostream& out, const WMResourcePlan& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _WMPool__isset { + _WMPool__isset() : parentPoolName(false), allocFraction(false), queryParallelism(false), schedulingPolicy(false) {} + bool parentPoolName :1; + bool allocFraction :1; + bool queryParallelism :1; + bool schedulingPolicy :1; +} _WMPool__isset; + +class WMPool { + public: + + WMPool(const WMPool&); + WMPool& operator=(const WMPool&); + WMPool() : resourcePlanName(), poolName(), parentPoolName(), allocFraction(0), queryParallelism(0), schedulingPolicy() { + } + + virtual ~WMPool() throw(); + std::string resourcePlanName; + std::string poolName; + std::string parentPoolName; + double allocFraction; + int32_t queryParallelism; + std::string schedulingPolicy; + + _WMPool__isset __isset; + + void __set_resourcePlanName(const std::string& val); + + void __set_poolName(const std::string& val); + + void __set_parentPoolName(const std::string& val); + + void __set_allocFraction(const double val); + + void __set_queryParallelism(const int32_t val); + + void __set_schedulingPolicy(const std::string& val); + + bool operator == (const WMPool & rhs) const + { + if (!(resourcePlanName == rhs.resourcePlanName)) + return false; + if (!(poolName == rhs.poolName)) + return false; + if (__isset.parentPoolName != rhs.__isset.parentPoolName) + return false; + else if (__isset.parentPoolName && !(parentPoolName == rhs.parentPoolName)) + return false; + if (__isset.allocFraction != rhs.__isset.allocFraction) + return false; + else if (__isset.allocFraction && !(allocFraction == rhs.allocFraction)) + return false; + if (__isset.queryParallelism != rhs.__isset.queryParallelism) + return false; + else if (__isset.queryParallelism && !(queryParallelism == rhs.queryParallelism)) + return false; + if (__isset.schedulingPolicy != rhs.__isset.schedulingPolicy) + return false; + else if (__isset.schedulingPolicy && !(schedulingPolicy == rhs.schedulingPolicy)) + return false; + return true; + } + bool operator != (const WMPool &rhs) const { + return !(*this == rhs); + } + + bool operator < (const WMPool & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(WMPool &a, WMPool &b); + +inline std::ostream& operator<<(std::ostream& out, const WMPool& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _WMTrigger__isset { + _WMTrigger__isset() : triggerExpression(false), actionExpression(false) {} + bool triggerExpression :1; + bool actionExpression :1; +} _WMTrigger__isset; + +class WMTrigger { + public: + + WMTrigger(const WMTrigger&); + WMTrigger& operator=(const WMTrigger&); + WMTrigger() : resourcePlanName(), poolName(), triggerExpression(), actionExpression() { + } + + virtual ~WMTrigger() throw(); + std::string resourcePlanName; + std::string poolName; + std::string triggerExpression; + std::string actionExpression; + + _WMTrigger__isset __isset; + + void __set_resourcePlanName(const std::string& val); + + void __set_poolName(const std::string& val); + + void __set_triggerExpression(const std::string& val); + + void __set_actionExpression(const std::string& val); + + bool operator == (const WMTrigger & rhs) const + { + if (!(resourcePlanName == rhs.resourcePlanName)) + return false; + if (!(poolName == rhs.poolName)) + return false; + if (__isset.triggerExpression != rhs.__isset.triggerExpression) + return false; + else if (__isset.triggerExpression && !(triggerExpression == rhs.triggerExpression)) + return false; + if (__isset.actionExpression != rhs.__isset.actionExpression) + return false; + else if (__isset.actionExpression && !(actionExpression == rhs.actionExpression)) + return false; + return true; + } + bool operator != (const WMTrigger &rhs) const { + return !(*this == rhs); + } + + bool operator < (const WMTrigger & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(WMTrigger &a, WMTrigger &b); + +inline std::ostream& operator<<(std::ostream& out, const WMTrigger& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _WMMapping__isset { + _WMMapping__isset() : poolName(false), ordering(false) {} + bool poolName :1; + bool ordering :1; +} _WMMapping__isset; + +class WMMapping { + public: + + WMMapping(const WMMapping&); + WMMapping& operator=(const WMMapping&); + WMMapping() : resourcePlanName(), entityType(), entityName(), poolName(), ordering(0) { + } + + virtual ~WMMapping() throw(); + std::string resourcePlanName; + std::string entityType; + std::string entityName; + std::string poolName; + int32_t ordering; + + _WMMapping__isset __isset; + + void __set_resourcePlanName(const std::string& val); + + void __set_entityType(const std::string& val); + + void __set_entityName(const std::string& val); + + void __set_poolName(const std::string& val); + + void __set_ordering(const int32_t val); + + bool operator == (const WMMapping & rhs) const + { + if (!(resourcePlanName == rhs.resourcePlanName)) + return false; + if (!(entityType == rhs.entityType)) + return false; + if (!(entityName == rhs.entityName)) + return false; + if (__isset.poolName != rhs.__isset.poolName) + return false; + else if (__isset.poolName && !(poolName == rhs.poolName)) + return false; + if (__isset.ordering != rhs.__isset.ordering) + return false; + else if (__isset.ordering && !(ordering == rhs.ordering)) + return false; + return true; + } + bool operator != (const WMMapping &rhs) const { + return !(*this == rhs); + } + + bool operator < (const WMMapping & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(WMMapping &a, WMMapping &b); + +inline std::ostream& operator<<(std::ostream& out, const WMMapping& obj) +{ + obj.printTo(out); + return out; +} + typedef struct _MetaException__isset { _MetaException__isset() : message(false) {} bool message :1; diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMMapping.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMMapping.java new file mode 100644 index 0000000000..cbf4f3867b --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMMapping.java @@ -0,0 +1,804 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class WMMapping implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMMapping"); + + private static final org.apache.thrift.protocol.TField RESOURCE_PLAN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("resourcePlanName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField ENTITY_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("entityType", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField ENTITY_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("entityName", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField POOL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("poolName", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField ORDERING_FIELD_DESC = new org.apache.thrift.protocol.TField("ordering", org.apache.thrift.protocol.TType.I32, (short)5); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new WMMappingStandardSchemeFactory()); + schemes.put(TupleScheme.class, new WMMappingTupleSchemeFactory()); + } + + private String resourcePlanName; // required + private String entityType; // required + private String entityName; // required + private String poolName; // optional + private int ordering; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RESOURCE_PLAN_NAME((short)1, "resourcePlanName"), + ENTITY_TYPE((short)2, "entityType"), + ENTITY_NAME((short)3, "entityName"), + POOL_NAME((short)4, "poolName"), + ORDERING((short)5, "ordering"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RESOURCE_PLAN_NAME + return RESOURCE_PLAN_NAME; + case 2: // ENTITY_TYPE + return ENTITY_TYPE; + case 3: // ENTITY_NAME + return ENTITY_NAME; + case 4: // POOL_NAME + return POOL_NAME; + case 5: // ORDERING + return ORDERING; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __ORDERING_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.POOL_NAME,_Fields.ORDERING}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RESOURCE_PLAN_NAME, new org.apache.thrift.meta_data.FieldMetaData("resourcePlanName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ENTITY_TYPE, new org.apache.thrift.meta_data.FieldMetaData("entityType", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ENTITY_NAME, new org.apache.thrift.meta_data.FieldMetaData("entityName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.POOL_NAME, new org.apache.thrift.meta_data.FieldMetaData("poolName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ORDERING, new org.apache.thrift.meta_data.FieldMetaData("ordering", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMMapping.class, metaDataMap); + } + + public WMMapping() { + } + + public WMMapping( + String resourcePlanName, + String entityType, + String entityName) + { + this(); + this.resourcePlanName = resourcePlanName; + this.entityType = entityType; + this.entityName = entityName; + } + + /** + * Performs a deep copy on other. + */ + public WMMapping(WMMapping other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetResourcePlanName()) { + this.resourcePlanName = other.resourcePlanName; + } + if (other.isSetEntityType()) { + this.entityType = other.entityType; + } + if (other.isSetEntityName()) { + this.entityName = other.entityName; + } + if (other.isSetPoolName()) { + this.poolName = other.poolName; + } + this.ordering = other.ordering; + } + + public WMMapping deepCopy() { + return new WMMapping(this); + } + + @Override + public void clear() { + this.resourcePlanName = null; + this.entityType = null; + this.entityName = null; + this.poolName = null; + setOrderingIsSet(false); + this.ordering = 0; + } + + public String getResourcePlanName() { + return this.resourcePlanName; + } + + public void setResourcePlanName(String resourcePlanName) { + this.resourcePlanName = resourcePlanName; + } + + public void unsetResourcePlanName() { + this.resourcePlanName = null; + } + + /** Returns true if field resourcePlanName is set (has been assigned a value) and false otherwise */ + public boolean isSetResourcePlanName() { + return this.resourcePlanName != null; + } + + public void setResourcePlanNameIsSet(boolean value) { + if (!value) { + this.resourcePlanName = null; + } + } + + public String getEntityType() { + return this.entityType; + } + + public void setEntityType(String entityType) { + this.entityType = entityType; + } + + public void unsetEntityType() { + this.entityType = null; + } + + /** Returns true if field entityType is set (has been assigned a value) and false otherwise */ + public boolean isSetEntityType() { + return this.entityType != null; + } + + public void setEntityTypeIsSet(boolean value) { + if (!value) { + this.entityType = null; + } + } + + public String getEntityName() { + return this.entityName; + } + + public void setEntityName(String entityName) { + this.entityName = entityName; + } + + public void unsetEntityName() { + this.entityName = null; + } + + /** Returns true if field entityName is set (has been assigned a value) and false otherwise */ + public boolean isSetEntityName() { + return this.entityName != null; + } + + public void setEntityNameIsSet(boolean value) { + if (!value) { + this.entityName = null; + } + } + + public String getPoolName() { + return this.poolName; + } + + public void setPoolName(String poolName) { + this.poolName = poolName; + } + + public void unsetPoolName() { + this.poolName = null; + } + + /** Returns true if field poolName is set (has been assigned a value) and false otherwise */ + public boolean isSetPoolName() { + return this.poolName != null; + } + + public void setPoolNameIsSet(boolean value) { + if (!value) { + this.poolName = null; + } + } + + public int getOrdering() { + return this.ordering; + } + + public void setOrdering(int ordering) { + this.ordering = ordering; + setOrderingIsSet(true); + } + + public void unsetOrdering() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ORDERING_ISSET_ID); + } + + /** Returns true if field ordering is set (has been assigned a value) and false otherwise */ + public boolean isSetOrdering() { + return EncodingUtils.testBit(__isset_bitfield, __ORDERING_ISSET_ID); + } + + public void setOrderingIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ORDERING_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RESOURCE_PLAN_NAME: + if (value == null) { + unsetResourcePlanName(); + } else { + setResourcePlanName((String)value); + } + break; + + case ENTITY_TYPE: + if (value == null) { + unsetEntityType(); + } else { + setEntityType((String)value); + } + break; + + case ENTITY_NAME: + if (value == null) { + unsetEntityName(); + } else { + setEntityName((String)value); + } + break; + + case POOL_NAME: + if (value == null) { + unsetPoolName(); + } else { + setPoolName((String)value); + } + break; + + case ORDERING: + if (value == null) { + unsetOrdering(); + } else { + setOrdering((Integer)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RESOURCE_PLAN_NAME: + return getResourcePlanName(); + + case ENTITY_TYPE: + return getEntityType(); + + case ENTITY_NAME: + return getEntityName(); + + case POOL_NAME: + return getPoolName(); + + case ORDERING: + return getOrdering(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RESOURCE_PLAN_NAME: + return isSetResourcePlanName(); + case ENTITY_TYPE: + return isSetEntityType(); + case ENTITY_NAME: + return isSetEntityName(); + case POOL_NAME: + return isSetPoolName(); + case ORDERING: + return isSetOrdering(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WMMapping) + return this.equals((WMMapping)that); + return false; + } + + public boolean equals(WMMapping that) { + if (that == null) + return false; + + boolean this_present_resourcePlanName = true && this.isSetResourcePlanName(); + boolean that_present_resourcePlanName = true && that.isSetResourcePlanName(); + if (this_present_resourcePlanName || that_present_resourcePlanName) { + if (!(this_present_resourcePlanName && that_present_resourcePlanName)) + return false; + if (!this.resourcePlanName.equals(that.resourcePlanName)) + return false; + } + + boolean this_present_entityType = true && this.isSetEntityType(); + boolean that_present_entityType = true && that.isSetEntityType(); + if (this_present_entityType || that_present_entityType) { + if (!(this_present_entityType && that_present_entityType)) + return false; + if (!this.entityType.equals(that.entityType)) + return false; + } + + boolean this_present_entityName = true && this.isSetEntityName(); + boolean that_present_entityName = true && that.isSetEntityName(); + if (this_present_entityName || that_present_entityName) { + if (!(this_present_entityName && that_present_entityName)) + return false; + if (!this.entityName.equals(that.entityName)) + return false; + } + + boolean this_present_poolName = true && this.isSetPoolName(); + boolean that_present_poolName = true && that.isSetPoolName(); + if (this_present_poolName || that_present_poolName) { + if (!(this_present_poolName && that_present_poolName)) + return false; + if (!this.poolName.equals(that.poolName)) + return false; + } + + boolean this_present_ordering = true && this.isSetOrdering(); + boolean that_present_ordering = true && that.isSetOrdering(); + if (this_present_ordering || that_present_ordering) { + if (!(this_present_ordering && that_present_ordering)) + return false; + if (this.ordering != that.ordering) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_resourcePlanName = true && (isSetResourcePlanName()); + list.add(present_resourcePlanName); + if (present_resourcePlanName) + list.add(resourcePlanName); + + boolean present_entityType = true && (isSetEntityType()); + list.add(present_entityType); + if (present_entityType) + list.add(entityType); + + boolean present_entityName = true && (isSetEntityName()); + list.add(present_entityName); + if (present_entityName) + list.add(entityName); + + boolean present_poolName = true && (isSetPoolName()); + list.add(present_poolName); + if (present_poolName) + list.add(poolName); + + boolean present_ordering = true && (isSetOrdering()); + list.add(present_ordering); + if (present_ordering) + list.add(ordering); + + return list.hashCode(); + } + + @Override + public int compareTo(WMMapping other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetResourcePlanName()).compareTo(other.isSetResourcePlanName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetResourcePlanName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.resourcePlanName, other.resourcePlanName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetEntityType()).compareTo(other.isSetEntityType()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEntityType()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.entityType, other.entityType); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetEntityName()).compareTo(other.isSetEntityName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEntityName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.entityName, other.entityName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPoolName()).compareTo(other.isSetPoolName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPoolName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.poolName, other.poolName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetOrdering()).compareTo(other.isSetOrdering()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetOrdering()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ordering, other.ordering); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WMMapping("); + boolean first = true; + + sb.append("resourcePlanName:"); + if (this.resourcePlanName == null) { + sb.append("null"); + } else { + sb.append(this.resourcePlanName); + } + first = false; + if (!first) sb.append(", "); + sb.append("entityType:"); + if (this.entityType == null) { + sb.append("null"); + } else { + sb.append(this.entityType); + } + first = false; + if (!first) sb.append(", "); + sb.append("entityName:"); + if (this.entityName == null) { + sb.append("null"); + } else { + sb.append(this.entityName); + } + first = false; + if (isSetPoolName()) { + if (!first) sb.append(", "); + sb.append("poolName:"); + if (this.poolName == null) { + sb.append("null"); + } else { + sb.append(this.poolName); + } + first = false; + } + if (isSetOrdering()) { + if (!first) sb.append(", "); + sb.append("ordering:"); + sb.append(this.ordering); + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetResourcePlanName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'resourcePlanName' is unset! Struct:" + toString()); + } + + if (!isSetEntityType()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'entityType' is unset! Struct:" + toString()); + } + + if (!isSetEntityName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'entityName' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class WMMappingStandardSchemeFactory implements SchemeFactory { + public WMMappingStandardScheme getScheme() { + return new WMMappingStandardScheme(); + } + } + + private static class WMMappingStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, WMMapping struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RESOURCE_PLAN_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.resourcePlanName = iprot.readString(); + struct.setResourcePlanNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // ENTITY_TYPE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.entityType = iprot.readString(); + struct.setEntityTypeIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // ENTITY_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.entityName = iprot.readString(); + struct.setEntityNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // POOL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.poolName = iprot.readString(); + struct.setPoolNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // ORDERING + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.ordering = iprot.readI32(); + struct.setOrderingIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, WMMapping struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.resourcePlanName != null) { + oprot.writeFieldBegin(RESOURCE_PLAN_NAME_FIELD_DESC); + oprot.writeString(struct.resourcePlanName); + oprot.writeFieldEnd(); + } + if (struct.entityType != null) { + oprot.writeFieldBegin(ENTITY_TYPE_FIELD_DESC); + oprot.writeString(struct.entityType); + oprot.writeFieldEnd(); + } + if (struct.entityName != null) { + oprot.writeFieldBegin(ENTITY_NAME_FIELD_DESC); + oprot.writeString(struct.entityName); + oprot.writeFieldEnd(); + } + if (struct.poolName != null) { + if (struct.isSetPoolName()) { + oprot.writeFieldBegin(POOL_NAME_FIELD_DESC); + oprot.writeString(struct.poolName); + oprot.writeFieldEnd(); + } + } + if (struct.isSetOrdering()) { + oprot.writeFieldBegin(ORDERING_FIELD_DESC); + oprot.writeI32(struct.ordering); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class WMMappingTupleSchemeFactory implements SchemeFactory { + public WMMappingTupleScheme getScheme() { + return new WMMappingTupleScheme(); + } + } + + private static class WMMappingTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, WMMapping struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.resourcePlanName); + oprot.writeString(struct.entityType); + oprot.writeString(struct.entityName); + BitSet optionals = new BitSet(); + if (struct.isSetPoolName()) { + optionals.set(0); + } + if (struct.isSetOrdering()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetPoolName()) { + oprot.writeString(struct.poolName); + } + if (struct.isSetOrdering()) { + oprot.writeI32(struct.ordering); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, WMMapping struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.resourcePlanName = iprot.readString(); + struct.setResourcePlanNameIsSet(true); + struct.entityType = iprot.readString(); + struct.setEntityTypeIsSet(true); + struct.entityName = iprot.readString(); + struct.setEntityNameIsSet(true); + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.poolName = iprot.readString(); + struct.setPoolNameIsSet(true); + } + if (incoming.get(1)) { + struct.ordering = iprot.readI32(); + struct.setOrderingIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMPool.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMPool.java new file mode 100644 index 0000000000..a89e030370 --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMPool.java @@ -0,0 +1,908 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class WMPool implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMPool"); + + private static final org.apache.thrift.protocol.TField RESOURCE_PLAN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("resourcePlanName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField POOL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("poolName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField PARENT_POOL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("parentPoolName", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField ALLOC_FRACTION_FIELD_DESC = new org.apache.thrift.protocol.TField("allocFraction", org.apache.thrift.protocol.TType.DOUBLE, (short)4); + private static final org.apache.thrift.protocol.TField QUERY_PARALLELISM_FIELD_DESC = new org.apache.thrift.protocol.TField("queryParallelism", org.apache.thrift.protocol.TType.I32, (short)5); + private static final org.apache.thrift.protocol.TField SCHEDULING_POLICY_FIELD_DESC = new org.apache.thrift.protocol.TField("schedulingPolicy", org.apache.thrift.protocol.TType.STRING, (short)6); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new WMPoolStandardSchemeFactory()); + schemes.put(TupleScheme.class, new WMPoolTupleSchemeFactory()); + } + + private String resourcePlanName; // required + private String poolName; // required + private String parentPoolName; // optional + private double allocFraction; // optional + private int queryParallelism; // optional + private String schedulingPolicy; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RESOURCE_PLAN_NAME((short)1, "resourcePlanName"), + POOL_NAME((short)2, "poolName"), + PARENT_POOL_NAME((short)3, "parentPoolName"), + ALLOC_FRACTION((short)4, "allocFraction"), + QUERY_PARALLELISM((short)5, "queryParallelism"), + SCHEDULING_POLICY((short)6, "schedulingPolicy"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RESOURCE_PLAN_NAME + return RESOURCE_PLAN_NAME; + case 2: // POOL_NAME + return POOL_NAME; + case 3: // PARENT_POOL_NAME + return PARENT_POOL_NAME; + case 4: // ALLOC_FRACTION + return ALLOC_FRACTION; + case 5: // QUERY_PARALLELISM + return QUERY_PARALLELISM; + case 6: // SCHEDULING_POLICY + return SCHEDULING_POLICY; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __ALLOCFRACTION_ISSET_ID = 0; + private static final int __QUERYPARALLELISM_ISSET_ID = 1; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.PARENT_POOL_NAME,_Fields.ALLOC_FRACTION,_Fields.QUERY_PARALLELISM,_Fields.SCHEDULING_POLICY}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RESOURCE_PLAN_NAME, new org.apache.thrift.meta_data.FieldMetaData("resourcePlanName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.POOL_NAME, new org.apache.thrift.meta_data.FieldMetaData("poolName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PARENT_POOL_NAME, new org.apache.thrift.meta_data.FieldMetaData("parentPoolName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ALLOC_FRACTION, new org.apache.thrift.meta_data.FieldMetaData("allocFraction", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); + tmpMap.put(_Fields.QUERY_PARALLELISM, new org.apache.thrift.meta_data.FieldMetaData("queryParallelism", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.SCHEDULING_POLICY, new org.apache.thrift.meta_data.FieldMetaData("schedulingPolicy", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMPool.class, metaDataMap); + } + + public WMPool() { + } + + public WMPool( + String resourcePlanName, + String poolName) + { + this(); + this.resourcePlanName = resourcePlanName; + this.poolName = poolName; + } + + /** + * Performs a deep copy on other. + */ + public WMPool(WMPool other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetResourcePlanName()) { + this.resourcePlanName = other.resourcePlanName; + } + if (other.isSetPoolName()) { + this.poolName = other.poolName; + } + if (other.isSetParentPoolName()) { + this.parentPoolName = other.parentPoolName; + } + this.allocFraction = other.allocFraction; + this.queryParallelism = other.queryParallelism; + if (other.isSetSchedulingPolicy()) { + this.schedulingPolicy = other.schedulingPolicy; + } + } + + public WMPool deepCopy() { + return new WMPool(this); + } + + @Override + public void clear() { + this.resourcePlanName = null; + this.poolName = null; + this.parentPoolName = null; + setAllocFractionIsSet(false); + this.allocFraction = 0.0; + setQueryParallelismIsSet(false); + this.queryParallelism = 0; + this.schedulingPolicy = null; + } + + public String getResourcePlanName() { + return this.resourcePlanName; + } + + public void setResourcePlanName(String resourcePlanName) { + this.resourcePlanName = resourcePlanName; + } + + public void unsetResourcePlanName() { + this.resourcePlanName = null; + } + + /** Returns true if field resourcePlanName is set (has been assigned a value) and false otherwise */ + public boolean isSetResourcePlanName() { + return this.resourcePlanName != null; + } + + public void setResourcePlanNameIsSet(boolean value) { + if (!value) { + this.resourcePlanName = null; + } + } + + public String getPoolName() { + return this.poolName; + } + + public void setPoolName(String poolName) { + this.poolName = poolName; + } + + public void unsetPoolName() { + this.poolName = null; + } + + /** Returns true if field poolName is set (has been assigned a value) and false otherwise */ + public boolean isSetPoolName() { + return this.poolName != null; + } + + public void setPoolNameIsSet(boolean value) { + if (!value) { + this.poolName = null; + } + } + + public String getParentPoolName() { + return this.parentPoolName; + } + + public void setParentPoolName(String parentPoolName) { + this.parentPoolName = parentPoolName; + } + + public void unsetParentPoolName() { + this.parentPoolName = null; + } + + /** Returns true if field parentPoolName is set (has been assigned a value) and false otherwise */ + public boolean isSetParentPoolName() { + return this.parentPoolName != null; + } + + public void setParentPoolNameIsSet(boolean value) { + if (!value) { + this.parentPoolName = null; + } + } + + public double getAllocFraction() { + return this.allocFraction; + } + + public void setAllocFraction(double allocFraction) { + this.allocFraction = allocFraction; + setAllocFractionIsSet(true); + } + + public void unsetAllocFraction() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ALLOCFRACTION_ISSET_ID); + } + + /** Returns true if field allocFraction is set (has been assigned a value) and false otherwise */ + public boolean isSetAllocFraction() { + return EncodingUtils.testBit(__isset_bitfield, __ALLOCFRACTION_ISSET_ID); + } + + public void setAllocFractionIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ALLOCFRACTION_ISSET_ID, value); + } + + public int getQueryParallelism() { + return this.queryParallelism; + } + + public void setQueryParallelism(int queryParallelism) { + this.queryParallelism = queryParallelism; + setQueryParallelismIsSet(true); + } + + public void unsetQueryParallelism() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __QUERYPARALLELISM_ISSET_ID); + } + + /** Returns true if field queryParallelism is set (has been assigned a value) and false otherwise */ + public boolean isSetQueryParallelism() { + return EncodingUtils.testBit(__isset_bitfield, __QUERYPARALLELISM_ISSET_ID); + } + + public void setQueryParallelismIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __QUERYPARALLELISM_ISSET_ID, value); + } + + public String getSchedulingPolicy() { + return this.schedulingPolicy; + } + + public void setSchedulingPolicy(String schedulingPolicy) { + this.schedulingPolicy = schedulingPolicy; + } + + public void unsetSchedulingPolicy() { + this.schedulingPolicy = null; + } + + /** Returns true if field schedulingPolicy is set (has been assigned a value) and false otherwise */ + public boolean isSetSchedulingPolicy() { + return this.schedulingPolicy != null; + } + + public void setSchedulingPolicyIsSet(boolean value) { + if (!value) { + this.schedulingPolicy = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RESOURCE_PLAN_NAME: + if (value == null) { + unsetResourcePlanName(); + } else { + setResourcePlanName((String)value); + } + break; + + case POOL_NAME: + if (value == null) { + unsetPoolName(); + } else { + setPoolName((String)value); + } + break; + + case PARENT_POOL_NAME: + if (value == null) { + unsetParentPoolName(); + } else { + setParentPoolName((String)value); + } + break; + + case ALLOC_FRACTION: + if (value == null) { + unsetAllocFraction(); + } else { + setAllocFraction((Double)value); + } + break; + + case QUERY_PARALLELISM: + if (value == null) { + unsetQueryParallelism(); + } else { + setQueryParallelism((Integer)value); + } + break; + + case SCHEDULING_POLICY: + if (value == null) { + unsetSchedulingPolicy(); + } else { + setSchedulingPolicy((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RESOURCE_PLAN_NAME: + return getResourcePlanName(); + + case POOL_NAME: + return getPoolName(); + + case PARENT_POOL_NAME: + return getParentPoolName(); + + case ALLOC_FRACTION: + return getAllocFraction(); + + case QUERY_PARALLELISM: + return getQueryParallelism(); + + case SCHEDULING_POLICY: + return getSchedulingPolicy(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RESOURCE_PLAN_NAME: + return isSetResourcePlanName(); + case POOL_NAME: + return isSetPoolName(); + case PARENT_POOL_NAME: + return isSetParentPoolName(); + case ALLOC_FRACTION: + return isSetAllocFraction(); + case QUERY_PARALLELISM: + return isSetQueryParallelism(); + case SCHEDULING_POLICY: + return isSetSchedulingPolicy(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WMPool) + return this.equals((WMPool)that); + return false; + } + + public boolean equals(WMPool that) { + if (that == null) + return false; + + boolean this_present_resourcePlanName = true && this.isSetResourcePlanName(); + boolean that_present_resourcePlanName = true && that.isSetResourcePlanName(); + if (this_present_resourcePlanName || that_present_resourcePlanName) { + if (!(this_present_resourcePlanName && that_present_resourcePlanName)) + return false; + if (!this.resourcePlanName.equals(that.resourcePlanName)) + return false; + } + + boolean this_present_poolName = true && this.isSetPoolName(); + boolean that_present_poolName = true && that.isSetPoolName(); + if (this_present_poolName || that_present_poolName) { + if (!(this_present_poolName && that_present_poolName)) + return false; + if (!this.poolName.equals(that.poolName)) + return false; + } + + boolean this_present_parentPoolName = true && this.isSetParentPoolName(); + boolean that_present_parentPoolName = true && that.isSetParentPoolName(); + if (this_present_parentPoolName || that_present_parentPoolName) { + if (!(this_present_parentPoolName && that_present_parentPoolName)) + return false; + if (!this.parentPoolName.equals(that.parentPoolName)) + return false; + } + + boolean this_present_allocFraction = true && this.isSetAllocFraction(); + boolean that_present_allocFraction = true && that.isSetAllocFraction(); + if (this_present_allocFraction || that_present_allocFraction) { + if (!(this_present_allocFraction && that_present_allocFraction)) + return false; + if (this.allocFraction != that.allocFraction) + return false; + } + + boolean this_present_queryParallelism = true && this.isSetQueryParallelism(); + boolean that_present_queryParallelism = true && that.isSetQueryParallelism(); + if (this_present_queryParallelism || that_present_queryParallelism) { + if (!(this_present_queryParallelism && that_present_queryParallelism)) + return false; + if (this.queryParallelism != that.queryParallelism) + return false; + } + + boolean this_present_schedulingPolicy = true && this.isSetSchedulingPolicy(); + boolean that_present_schedulingPolicy = true && that.isSetSchedulingPolicy(); + if (this_present_schedulingPolicy || that_present_schedulingPolicy) { + if (!(this_present_schedulingPolicy && that_present_schedulingPolicy)) + return false; + if (!this.schedulingPolicy.equals(that.schedulingPolicy)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_resourcePlanName = true && (isSetResourcePlanName()); + list.add(present_resourcePlanName); + if (present_resourcePlanName) + list.add(resourcePlanName); + + boolean present_poolName = true && (isSetPoolName()); + list.add(present_poolName); + if (present_poolName) + list.add(poolName); + + boolean present_parentPoolName = true && (isSetParentPoolName()); + list.add(present_parentPoolName); + if (present_parentPoolName) + list.add(parentPoolName); + + boolean present_allocFraction = true && (isSetAllocFraction()); + list.add(present_allocFraction); + if (present_allocFraction) + list.add(allocFraction); + + boolean present_queryParallelism = true && (isSetQueryParallelism()); + list.add(present_queryParallelism); + if (present_queryParallelism) + list.add(queryParallelism); + + boolean present_schedulingPolicy = true && (isSetSchedulingPolicy()); + list.add(present_schedulingPolicy); + if (present_schedulingPolicy) + list.add(schedulingPolicy); + + return list.hashCode(); + } + + @Override + public int compareTo(WMPool other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetResourcePlanName()).compareTo(other.isSetResourcePlanName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetResourcePlanName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.resourcePlanName, other.resourcePlanName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPoolName()).compareTo(other.isSetPoolName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPoolName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.poolName, other.poolName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetParentPoolName()).compareTo(other.isSetParentPoolName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetParentPoolName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parentPoolName, other.parentPoolName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetAllocFraction()).compareTo(other.isSetAllocFraction()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetAllocFraction()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.allocFraction, other.allocFraction); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetQueryParallelism()).compareTo(other.isSetQueryParallelism()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetQueryParallelism()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.queryParallelism, other.queryParallelism); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetSchedulingPolicy()).compareTo(other.isSetSchedulingPolicy()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSchedulingPolicy()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schedulingPolicy, other.schedulingPolicy); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WMPool("); + boolean first = true; + + sb.append("resourcePlanName:"); + if (this.resourcePlanName == null) { + sb.append("null"); + } else { + sb.append(this.resourcePlanName); + } + first = false; + if (!first) sb.append(", "); + sb.append("poolName:"); + if (this.poolName == null) { + sb.append("null"); + } else { + sb.append(this.poolName); + } + first = false; + if (isSetParentPoolName()) { + if (!first) sb.append(", "); + sb.append("parentPoolName:"); + if (this.parentPoolName == null) { + sb.append("null"); + } else { + sb.append(this.parentPoolName); + } + first = false; + } + if (isSetAllocFraction()) { + if (!first) sb.append(", "); + sb.append("allocFraction:"); + sb.append(this.allocFraction); + first = false; + } + if (isSetQueryParallelism()) { + if (!first) sb.append(", "); + sb.append("queryParallelism:"); + sb.append(this.queryParallelism); + first = false; + } + if (isSetSchedulingPolicy()) { + if (!first) sb.append(", "); + sb.append("schedulingPolicy:"); + if (this.schedulingPolicy == null) { + sb.append("null"); + } else { + sb.append(this.schedulingPolicy); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetResourcePlanName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'resourcePlanName' is unset! Struct:" + toString()); + } + + if (!isSetPoolName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'poolName' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class WMPoolStandardSchemeFactory implements SchemeFactory { + public WMPoolStandardScheme getScheme() { + return new WMPoolStandardScheme(); + } + } + + private static class WMPoolStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, WMPool struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RESOURCE_PLAN_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.resourcePlanName = iprot.readString(); + struct.setResourcePlanNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // POOL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.poolName = iprot.readString(); + struct.setPoolNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // PARENT_POOL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.parentPoolName = iprot.readString(); + struct.setParentPoolNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // ALLOC_FRACTION + if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { + struct.allocFraction = iprot.readDouble(); + struct.setAllocFractionIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // QUERY_PARALLELISM + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.queryParallelism = iprot.readI32(); + struct.setQueryParallelismIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // SCHEDULING_POLICY + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.schedulingPolicy = iprot.readString(); + struct.setSchedulingPolicyIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, WMPool struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.resourcePlanName != null) { + oprot.writeFieldBegin(RESOURCE_PLAN_NAME_FIELD_DESC); + oprot.writeString(struct.resourcePlanName); + oprot.writeFieldEnd(); + } + if (struct.poolName != null) { + oprot.writeFieldBegin(POOL_NAME_FIELD_DESC); + oprot.writeString(struct.poolName); + oprot.writeFieldEnd(); + } + if (struct.parentPoolName != null) { + if (struct.isSetParentPoolName()) { + oprot.writeFieldBegin(PARENT_POOL_NAME_FIELD_DESC); + oprot.writeString(struct.parentPoolName); + oprot.writeFieldEnd(); + } + } + if (struct.isSetAllocFraction()) { + oprot.writeFieldBegin(ALLOC_FRACTION_FIELD_DESC); + oprot.writeDouble(struct.allocFraction); + oprot.writeFieldEnd(); + } + if (struct.isSetQueryParallelism()) { + oprot.writeFieldBegin(QUERY_PARALLELISM_FIELD_DESC); + oprot.writeI32(struct.queryParallelism); + oprot.writeFieldEnd(); + } + if (struct.schedulingPolicy != null) { + if (struct.isSetSchedulingPolicy()) { + oprot.writeFieldBegin(SCHEDULING_POLICY_FIELD_DESC); + oprot.writeString(struct.schedulingPolicy); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class WMPoolTupleSchemeFactory implements SchemeFactory { + public WMPoolTupleScheme getScheme() { + return new WMPoolTupleScheme(); + } + } + + private static class WMPoolTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, WMPool struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.resourcePlanName); + oprot.writeString(struct.poolName); + BitSet optionals = new BitSet(); + if (struct.isSetParentPoolName()) { + optionals.set(0); + } + if (struct.isSetAllocFraction()) { + optionals.set(1); + } + if (struct.isSetQueryParallelism()) { + optionals.set(2); + } + if (struct.isSetSchedulingPolicy()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetParentPoolName()) { + oprot.writeString(struct.parentPoolName); + } + if (struct.isSetAllocFraction()) { + oprot.writeDouble(struct.allocFraction); + } + if (struct.isSetQueryParallelism()) { + oprot.writeI32(struct.queryParallelism); + } + if (struct.isSetSchedulingPolicy()) { + oprot.writeString(struct.schedulingPolicy); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, WMPool struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.resourcePlanName = iprot.readString(); + struct.setResourcePlanNameIsSet(true); + struct.poolName = iprot.readString(); + struct.setPoolNameIsSet(true); + BitSet incoming = iprot.readBitSet(4); + if (incoming.get(0)) { + struct.parentPoolName = iprot.readString(); + struct.setParentPoolNameIsSet(true); + } + if (incoming.get(1)) { + struct.allocFraction = iprot.readDouble(); + struct.setAllocFractionIsSet(true); + } + if (incoming.get(2)) { + struct.queryParallelism = iprot.readI32(); + struct.setQueryParallelismIsSet(true); + } + if (incoming.get(3)) { + struct.schedulingPolicy = iprot.readString(); + struct.setSchedulingPolicyIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMResourcePlan.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMResourcePlan.java new file mode 100644 index 0000000000..a012da1ea2 --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMResourcePlan.java @@ -0,0 +1,602 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class WMResourcePlan implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMResourcePlan"); + + private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField QUERY_PARALLELISM_FIELD_DESC = new org.apache.thrift.protocol.TField("queryParallelism", org.apache.thrift.protocol.TType.I32, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new WMResourcePlanStandardSchemeFactory()); + schemes.put(TupleScheme.class, new WMResourcePlanTupleSchemeFactory()); + } + + private String name; // required + private String status; // optional + private int queryParallelism; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + NAME((short)1, "name"), + STATUS((short)2, "status"), + QUERY_PARALLELISM((short)3, "queryParallelism"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // NAME + return NAME; + case 2: // STATUS + return STATUS; + case 3: // QUERY_PARALLELISM + return QUERY_PARALLELISM; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __QUERYPARALLELISM_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.STATUS,_Fields.QUERY_PARALLELISM}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.QUERY_PARALLELISM, new org.apache.thrift.meta_data.FieldMetaData("queryParallelism", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMResourcePlan.class, metaDataMap); + } + + public WMResourcePlan() { + } + + public WMResourcePlan( + String name) + { + this(); + this.name = name; + } + + /** + * Performs a deep copy on other. + */ + public WMResourcePlan(WMResourcePlan other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetName()) { + this.name = other.name; + } + if (other.isSetStatus()) { + this.status = other.status; + } + this.queryParallelism = other.queryParallelism; + } + + public WMResourcePlan deepCopy() { + return new WMResourcePlan(this); + } + + @Override + public void clear() { + this.name = null; + this.status = null; + setQueryParallelismIsSet(false); + this.queryParallelism = 0; + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public void unsetName() { + this.name = null; + } + + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean isSetName() { + return this.name != null; + } + + public void setNameIsSet(boolean value) { + if (!value) { + this.name = null; + } + } + + public String getStatus() { + return this.status; + } + + public void setStatus(String status) { + this.status = status; + } + + public void unsetStatus() { + this.status = null; + } + + /** Returns true if field status is set (has been assigned a value) and false otherwise */ + public boolean isSetStatus() { + return this.status != null; + } + + public void setStatusIsSet(boolean value) { + if (!value) { + this.status = null; + } + } + + public int getQueryParallelism() { + return this.queryParallelism; + } + + public void setQueryParallelism(int queryParallelism) { + this.queryParallelism = queryParallelism; + setQueryParallelismIsSet(true); + } + + public void unsetQueryParallelism() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __QUERYPARALLELISM_ISSET_ID); + } + + /** Returns true if field queryParallelism is set (has been assigned a value) and false otherwise */ + public boolean isSetQueryParallelism() { + return EncodingUtils.testBit(__isset_bitfield, __QUERYPARALLELISM_ISSET_ID); + } + + public void setQueryParallelismIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __QUERYPARALLELISM_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case NAME: + if (value == null) { + unsetName(); + } else { + setName((String)value); + } + break; + + case STATUS: + if (value == null) { + unsetStatus(); + } else { + setStatus((String)value); + } + break; + + case QUERY_PARALLELISM: + if (value == null) { + unsetQueryParallelism(); + } else { + setQueryParallelism((Integer)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case NAME: + return getName(); + + case STATUS: + return getStatus(); + + case QUERY_PARALLELISM: + return getQueryParallelism(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case NAME: + return isSetName(); + case STATUS: + return isSetStatus(); + case QUERY_PARALLELISM: + return isSetQueryParallelism(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WMResourcePlan) + return this.equals((WMResourcePlan)that); + return false; + } + + public boolean equals(WMResourcePlan that) { + if (that == null) + return false; + + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) + return false; + if (!this.name.equals(that.name)) + return false; + } + + boolean this_present_status = true && this.isSetStatus(); + boolean that_present_status = true && that.isSetStatus(); + if (this_present_status || that_present_status) { + if (!(this_present_status && that_present_status)) + return false; + if (!this.status.equals(that.status)) + return false; + } + + boolean this_present_queryParallelism = true && this.isSetQueryParallelism(); + boolean that_present_queryParallelism = true && that.isSetQueryParallelism(); + if (this_present_queryParallelism || that_present_queryParallelism) { + if (!(this_present_queryParallelism && that_present_queryParallelism)) + return false; + if (this.queryParallelism != that.queryParallelism) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_name = true && (isSetName()); + list.add(present_name); + if (present_name) + list.add(name); + + boolean present_status = true && (isSetStatus()); + list.add(present_status); + if (present_status) + list.add(status); + + boolean present_queryParallelism = true && (isSetQueryParallelism()); + list.add(present_queryParallelism); + if (present_queryParallelism) + list.add(queryParallelism); + + return list.hashCode(); + } + + @Override + public int compareTo(WMResourcePlan other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetStatus()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetQueryParallelism()).compareTo(other.isSetQueryParallelism()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetQueryParallelism()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.queryParallelism, other.queryParallelism); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WMResourcePlan("); + boolean first = true; + + sb.append("name:"); + if (this.name == null) { + sb.append("null"); + } else { + sb.append(this.name); + } + first = false; + if (isSetStatus()) { + if (!first) sb.append(", "); + sb.append("status:"); + if (this.status == null) { + sb.append("null"); + } else { + sb.append(this.status); + } + first = false; + } + if (isSetQueryParallelism()) { + if (!first) sb.append(", "); + sb.append("queryParallelism:"); + sb.append(this.queryParallelism); + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class WMResourcePlanStandardSchemeFactory implements SchemeFactory { + public WMResourcePlanStandardScheme getScheme() { + return new WMResourcePlanStandardScheme(); + } + } + + private static class WMResourcePlanStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, WMResourcePlan struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // STATUS + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.status = iprot.readString(); + struct.setStatusIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // QUERY_PARALLELISM + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.queryParallelism = iprot.readI32(); + struct.setQueryParallelismIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, WMResourcePlan struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(struct.name); + oprot.writeFieldEnd(); + } + if (struct.status != null) { + if (struct.isSetStatus()) { + oprot.writeFieldBegin(STATUS_FIELD_DESC); + oprot.writeString(struct.status); + oprot.writeFieldEnd(); + } + } + if (struct.isSetQueryParallelism()) { + oprot.writeFieldBegin(QUERY_PARALLELISM_FIELD_DESC); + oprot.writeI32(struct.queryParallelism); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class WMResourcePlanTupleSchemeFactory implements SchemeFactory { + public WMResourcePlanTupleScheme getScheme() { + return new WMResourcePlanTupleScheme(); + } + } + + private static class WMResourcePlanTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, WMResourcePlan struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.name); + BitSet optionals = new BitSet(); + if (struct.isSetStatus()) { + optionals.set(0); + } + if (struct.isSetQueryParallelism()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetStatus()) { + oprot.writeString(struct.status); + } + if (struct.isSetQueryParallelism()) { + oprot.writeI32(struct.queryParallelism); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, WMResourcePlan struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.name = iprot.readString(); + struct.setNameIsSet(true); + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.status = iprot.readString(); + struct.setStatusIsSet(true); + } + if (incoming.get(1)) { + struct.queryParallelism = iprot.readI32(); + struct.setQueryParallelismIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMTrigger.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMTrigger.java new file mode 100644 index 0000000000..848b8c2c9f --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMTrigger.java @@ -0,0 +1,706 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class WMTrigger implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMTrigger"); + + private static final org.apache.thrift.protocol.TField RESOURCE_PLAN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("resourcePlanName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField POOL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("poolName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TRIGGER_EXPRESSION_FIELD_DESC = new org.apache.thrift.protocol.TField("triggerExpression", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField ACTION_EXPRESSION_FIELD_DESC = new org.apache.thrift.protocol.TField("actionExpression", org.apache.thrift.protocol.TType.STRING, (short)4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new WMTriggerStandardSchemeFactory()); + schemes.put(TupleScheme.class, new WMTriggerTupleSchemeFactory()); + } + + private String resourcePlanName; // required + private String poolName; // required + private String triggerExpression; // optional + private String actionExpression; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RESOURCE_PLAN_NAME((short)1, "resourcePlanName"), + POOL_NAME((short)2, "poolName"), + TRIGGER_EXPRESSION((short)3, "triggerExpression"), + ACTION_EXPRESSION((short)4, "actionExpression"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RESOURCE_PLAN_NAME + return RESOURCE_PLAN_NAME; + case 2: // POOL_NAME + return POOL_NAME; + case 3: // TRIGGER_EXPRESSION + return TRIGGER_EXPRESSION; + case 4: // ACTION_EXPRESSION + return ACTION_EXPRESSION; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final _Fields optionals[] = {_Fields.TRIGGER_EXPRESSION,_Fields.ACTION_EXPRESSION}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RESOURCE_PLAN_NAME, new org.apache.thrift.meta_data.FieldMetaData("resourcePlanName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.POOL_NAME, new org.apache.thrift.meta_data.FieldMetaData("poolName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TRIGGER_EXPRESSION, new org.apache.thrift.meta_data.FieldMetaData("triggerExpression", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ACTION_EXPRESSION, new org.apache.thrift.meta_data.FieldMetaData("actionExpression", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMTrigger.class, metaDataMap); + } + + public WMTrigger() { + } + + public WMTrigger( + String resourcePlanName, + String poolName) + { + this(); + this.resourcePlanName = resourcePlanName; + this.poolName = poolName; + } + + /** + * Performs a deep copy on other. + */ + public WMTrigger(WMTrigger other) { + if (other.isSetResourcePlanName()) { + this.resourcePlanName = other.resourcePlanName; + } + if (other.isSetPoolName()) { + this.poolName = other.poolName; + } + if (other.isSetTriggerExpression()) { + this.triggerExpression = other.triggerExpression; + } + if (other.isSetActionExpression()) { + this.actionExpression = other.actionExpression; + } + } + + public WMTrigger deepCopy() { + return new WMTrigger(this); + } + + @Override + public void clear() { + this.resourcePlanName = null; + this.poolName = null; + this.triggerExpression = null; + this.actionExpression = null; + } + + public String getResourcePlanName() { + return this.resourcePlanName; + } + + public void setResourcePlanName(String resourcePlanName) { + this.resourcePlanName = resourcePlanName; + } + + public void unsetResourcePlanName() { + this.resourcePlanName = null; + } + + /** Returns true if field resourcePlanName is set (has been assigned a value) and false otherwise */ + public boolean isSetResourcePlanName() { + return this.resourcePlanName != null; + } + + public void setResourcePlanNameIsSet(boolean value) { + if (!value) { + this.resourcePlanName = null; + } + } + + public String getPoolName() { + return this.poolName; + } + + public void setPoolName(String poolName) { + this.poolName = poolName; + } + + public void unsetPoolName() { + this.poolName = null; + } + + /** Returns true if field poolName is set (has been assigned a value) and false otherwise */ + public boolean isSetPoolName() { + return this.poolName != null; + } + + public void setPoolNameIsSet(boolean value) { + if (!value) { + this.poolName = null; + } + } + + public String getTriggerExpression() { + return this.triggerExpression; + } + + public void setTriggerExpression(String triggerExpression) { + this.triggerExpression = triggerExpression; + } + + public void unsetTriggerExpression() { + this.triggerExpression = null; + } + + /** Returns true if field triggerExpression is set (has been assigned a value) and false otherwise */ + public boolean isSetTriggerExpression() { + return this.triggerExpression != null; + } + + public void setTriggerExpressionIsSet(boolean value) { + if (!value) { + this.triggerExpression = null; + } + } + + public String getActionExpression() { + return this.actionExpression; + } + + public void setActionExpression(String actionExpression) { + this.actionExpression = actionExpression; + } + + public void unsetActionExpression() { + this.actionExpression = null; + } + + /** Returns true if field actionExpression is set (has been assigned a value) and false otherwise */ + public boolean isSetActionExpression() { + return this.actionExpression != null; + } + + public void setActionExpressionIsSet(boolean value) { + if (!value) { + this.actionExpression = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RESOURCE_PLAN_NAME: + if (value == null) { + unsetResourcePlanName(); + } else { + setResourcePlanName((String)value); + } + break; + + case POOL_NAME: + if (value == null) { + unsetPoolName(); + } else { + setPoolName((String)value); + } + break; + + case TRIGGER_EXPRESSION: + if (value == null) { + unsetTriggerExpression(); + } else { + setTriggerExpression((String)value); + } + break; + + case ACTION_EXPRESSION: + if (value == null) { + unsetActionExpression(); + } else { + setActionExpression((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RESOURCE_PLAN_NAME: + return getResourcePlanName(); + + case POOL_NAME: + return getPoolName(); + + case TRIGGER_EXPRESSION: + return getTriggerExpression(); + + case ACTION_EXPRESSION: + return getActionExpression(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RESOURCE_PLAN_NAME: + return isSetResourcePlanName(); + case POOL_NAME: + return isSetPoolName(); + case TRIGGER_EXPRESSION: + return isSetTriggerExpression(); + case ACTION_EXPRESSION: + return isSetActionExpression(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WMTrigger) + return this.equals((WMTrigger)that); + return false; + } + + public boolean equals(WMTrigger that) { + if (that == null) + return false; + + boolean this_present_resourcePlanName = true && this.isSetResourcePlanName(); + boolean that_present_resourcePlanName = true && that.isSetResourcePlanName(); + if (this_present_resourcePlanName || that_present_resourcePlanName) { + if (!(this_present_resourcePlanName && that_present_resourcePlanName)) + return false; + if (!this.resourcePlanName.equals(that.resourcePlanName)) + return false; + } + + boolean this_present_poolName = true && this.isSetPoolName(); + boolean that_present_poolName = true && that.isSetPoolName(); + if (this_present_poolName || that_present_poolName) { + if (!(this_present_poolName && that_present_poolName)) + return false; + if (!this.poolName.equals(that.poolName)) + return false; + } + + boolean this_present_triggerExpression = true && this.isSetTriggerExpression(); + boolean that_present_triggerExpression = true && that.isSetTriggerExpression(); + if (this_present_triggerExpression || that_present_triggerExpression) { + if (!(this_present_triggerExpression && that_present_triggerExpression)) + return false; + if (!this.triggerExpression.equals(that.triggerExpression)) + return false; + } + + boolean this_present_actionExpression = true && this.isSetActionExpression(); + boolean that_present_actionExpression = true && that.isSetActionExpression(); + if (this_present_actionExpression || that_present_actionExpression) { + if (!(this_present_actionExpression && that_present_actionExpression)) + return false; + if (!this.actionExpression.equals(that.actionExpression)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_resourcePlanName = true && (isSetResourcePlanName()); + list.add(present_resourcePlanName); + if (present_resourcePlanName) + list.add(resourcePlanName); + + boolean present_poolName = true && (isSetPoolName()); + list.add(present_poolName); + if (present_poolName) + list.add(poolName); + + boolean present_triggerExpression = true && (isSetTriggerExpression()); + list.add(present_triggerExpression); + if (present_triggerExpression) + list.add(triggerExpression); + + boolean present_actionExpression = true && (isSetActionExpression()); + list.add(present_actionExpression); + if (present_actionExpression) + list.add(actionExpression); + + return list.hashCode(); + } + + @Override + public int compareTo(WMTrigger other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetResourcePlanName()).compareTo(other.isSetResourcePlanName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetResourcePlanName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.resourcePlanName, other.resourcePlanName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPoolName()).compareTo(other.isSetPoolName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPoolName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.poolName, other.poolName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTriggerExpression()).compareTo(other.isSetTriggerExpression()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTriggerExpression()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.triggerExpression, other.triggerExpression); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetActionExpression()).compareTo(other.isSetActionExpression()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetActionExpression()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.actionExpression, other.actionExpression); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WMTrigger("); + boolean first = true; + + sb.append("resourcePlanName:"); + if (this.resourcePlanName == null) { + sb.append("null"); + } else { + sb.append(this.resourcePlanName); + } + first = false; + if (!first) sb.append(", "); + sb.append("poolName:"); + if (this.poolName == null) { + sb.append("null"); + } else { + sb.append(this.poolName); + } + first = false; + if (isSetTriggerExpression()) { + if (!first) sb.append(", "); + sb.append("triggerExpression:"); + if (this.triggerExpression == null) { + sb.append("null"); + } else { + sb.append(this.triggerExpression); + } + first = false; + } + if (isSetActionExpression()) { + if (!first) sb.append(", "); + sb.append("actionExpression:"); + if (this.actionExpression == null) { + sb.append("null"); + } else { + sb.append(this.actionExpression); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetResourcePlanName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'resourcePlanName' is unset! Struct:" + toString()); + } + + if (!isSetPoolName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'poolName' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class WMTriggerStandardSchemeFactory implements SchemeFactory { + public WMTriggerStandardScheme getScheme() { + return new WMTriggerStandardScheme(); + } + } + + private static class WMTriggerStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, WMTrigger struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RESOURCE_PLAN_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.resourcePlanName = iprot.readString(); + struct.setResourcePlanNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // POOL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.poolName = iprot.readString(); + struct.setPoolNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // TRIGGER_EXPRESSION + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.triggerExpression = iprot.readString(); + struct.setTriggerExpressionIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // ACTION_EXPRESSION + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.actionExpression = iprot.readString(); + struct.setActionExpressionIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, WMTrigger struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.resourcePlanName != null) { + oprot.writeFieldBegin(RESOURCE_PLAN_NAME_FIELD_DESC); + oprot.writeString(struct.resourcePlanName); + oprot.writeFieldEnd(); + } + if (struct.poolName != null) { + oprot.writeFieldBegin(POOL_NAME_FIELD_DESC); + oprot.writeString(struct.poolName); + oprot.writeFieldEnd(); + } + if (struct.triggerExpression != null) { + if (struct.isSetTriggerExpression()) { + oprot.writeFieldBegin(TRIGGER_EXPRESSION_FIELD_DESC); + oprot.writeString(struct.triggerExpression); + oprot.writeFieldEnd(); + } + } + if (struct.actionExpression != null) { + if (struct.isSetActionExpression()) { + oprot.writeFieldBegin(ACTION_EXPRESSION_FIELD_DESC); + oprot.writeString(struct.actionExpression); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class WMTriggerTupleSchemeFactory implements SchemeFactory { + public WMTriggerTupleScheme getScheme() { + return new WMTriggerTupleScheme(); + } + } + + private static class WMTriggerTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, WMTrigger struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.resourcePlanName); + oprot.writeString(struct.poolName); + BitSet optionals = new BitSet(); + if (struct.isSetTriggerExpression()) { + optionals.set(0); + } + if (struct.isSetActionExpression()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetTriggerExpression()) { + oprot.writeString(struct.triggerExpression); + } + if (struct.isSetActionExpression()) { + oprot.writeString(struct.actionExpression); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, WMTrigger struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.resourcePlanName = iprot.readString(); + struct.setResourcePlanNameIsSet(true); + struct.poolName = iprot.readString(); + struct.setPoolNameIsSet(true); + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.triggerExpression = iprot.readString(); + struct.setTriggerExpressionIsSet(true); + } + if (incoming.get(1)) { + struct.actionExpression = iprot.readString(); + struct.setActionExpressionIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php index 8cf9e33853..f2d8826e68 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -20430,6 +20430,628 @@ class TableMeta { } +class WMResourcePlan { + static $_TSPEC; + + /** + * @var string + */ + public $name = null; + /** + * @var string + */ + public $status = null; + /** + * @var int + */ + public $queryParallelism = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'status', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'queryParallelism', + 'type' => TType::I32, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + if (isset($vals['status'])) { + $this->status = $vals['status']; + } + if (isset($vals['queryParallelism'])) { + $this->queryParallelism = $vals['queryParallelism']; + } + } + } + + public function getName() { + return 'WMResourcePlan'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->status); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->queryParallelism); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('WMResourcePlan'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + if ($this->status !== null) { + $xfer += $output->writeFieldBegin('status', TType::STRING, 2); + $xfer += $output->writeString($this->status); + $xfer += $output->writeFieldEnd(); + } + if ($this->queryParallelism !== null) { + $xfer += $output->writeFieldBegin('queryParallelism', TType::I32, 3); + $xfer += $output->writeI32($this->queryParallelism); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class WMPool { + static $_TSPEC; + + /** + * @var string + */ + public $resourcePlanName = null; + /** + * @var string + */ + public $poolName = null; + /** + * @var string + */ + public $parentPoolName = null; + /** + * @var double + */ + public $allocFraction = null; + /** + * @var int + */ + public $queryParallelism = null; + /** + * @var string + */ + public $schedulingPolicy = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'resourcePlanName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'poolName', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'parentPoolName', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'allocFraction', + 'type' => TType::DOUBLE, + ), + 5 => array( + 'var' => 'queryParallelism', + 'type' => TType::I32, + ), + 6 => array( + 'var' => 'schedulingPolicy', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['resourcePlanName'])) { + $this->resourcePlanName = $vals['resourcePlanName']; + } + if (isset($vals['poolName'])) { + $this->poolName = $vals['poolName']; + } + if (isset($vals['parentPoolName'])) { + $this->parentPoolName = $vals['parentPoolName']; + } + if (isset($vals['allocFraction'])) { + $this->allocFraction = $vals['allocFraction']; + } + if (isset($vals['queryParallelism'])) { + $this->queryParallelism = $vals['queryParallelism']; + } + if (isset($vals['schedulingPolicy'])) { + $this->schedulingPolicy = $vals['schedulingPolicy']; + } + } + } + + public function getName() { + return 'WMPool'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->resourcePlanName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->poolName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->parentPoolName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::DOUBLE) { + $xfer += $input->readDouble($this->allocFraction); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->queryParallelism); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->schedulingPolicy); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('WMPool'); + if ($this->resourcePlanName !== null) { + $xfer += $output->writeFieldBegin('resourcePlanName', TType::STRING, 1); + $xfer += $output->writeString($this->resourcePlanName); + $xfer += $output->writeFieldEnd(); + } + if ($this->poolName !== null) { + $xfer += $output->writeFieldBegin('poolName', TType::STRING, 2); + $xfer += $output->writeString($this->poolName); + $xfer += $output->writeFieldEnd(); + } + if ($this->parentPoolName !== null) { + $xfer += $output->writeFieldBegin('parentPoolName', TType::STRING, 3); + $xfer += $output->writeString($this->parentPoolName); + $xfer += $output->writeFieldEnd(); + } + if ($this->allocFraction !== null) { + $xfer += $output->writeFieldBegin('allocFraction', TType::DOUBLE, 4); + $xfer += $output->writeDouble($this->allocFraction); + $xfer += $output->writeFieldEnd(); + } + if ($this->queryParallelism !== null) { + $xfer += $output->writeFieldBegin('queryParallelism', TType::I32, 5); + $xfer += $output->writeI32($this->queryParallelism); + $xfer += $output->writeFieldEnd(); + } + if ($this->schedulingPolicy !== null) { + $xfer += $output->writeFieldBegin('schedulingPolicy', TType::STRING, 6); + $xfer += $output->writeString($this->schedulingPolicy); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class WMTrigger { + static $_TSPEC; + + /** + * @var string + */ + public $resourcePlanName = null; + /** + * @var string + */ + public $poolName = null; + /** + * @var string + */ + public $triggerExpression = null; + /** + * @var string + */ + public $actionExpression = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'resourcePlanName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'poolName', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'triggerExpression', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'actionExpression', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['resourcePlanName'])) { + $this->resourcePlanName = $vals['resourcePlanName']; + } + if (isset($vals['poolName'])) { + $this->poolName = $vals['poolName']; + } + if (isset($vals['triggerExpression'])) { + $this->triggerExpression = $vals['triggerExpression']; + } + if (isset($vals['actionExpression'])) { + $this->actionExpression = $vals['actionExpression']; + } + } + } + + public function getName() { + return 'WMTrigger'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->resourcePlanName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->poolName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->triggerExpression); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->actionExpression); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('WMTrigger'); + if ($this->resourcePlanName !== null) { + $xfer += $output->writeFieldBegin('resourcePlanName', TType::STRING, 1); + $xfer += $output->writeString($this->resourcePlanName); + $xfer += $output->writeFieldEnd(); + } + if ($this->poolName !== null) { + $xfer += $output->writeFieldBegin('poolName', TType::STRING, 2); + $xfer += $output->writeString($this->poolName); + $xfer += $output->writeFieldEnd(); + } + if ($this->triggerExpression !== null) { + $xfer += $output->writeFieldBegin('triggerExpression', TType::STRING, 3); + $xfer += $output->writeString($this->triggerExpression); + $xfer += $output->writeFieldEnd(); + } + if ($this->actionExpression !== null) { + $xfer += $output->writeFieldBegin('actionExpression', TType::STRING, 4); + $xfer += $output->writeString($this->actionExpression); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class WMMapping { + static $_TSPEC; + + /** + * @var string + */ + public $resourcePlanName = null; + /** + * @var string + */ + public $entityType = null; + /** + * @var string + */ + public $entityName = null; + /** + * @var string + */ + public $poolName = null; + /** + * @var int + */ + public $ordering = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'resourcePlanName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'entityType', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'entityName', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'poolName', + 'type' => TType::STRING, + ), + 5 => array( + 'var' => 'ordering', + 'type' => TType::I32, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['resourcePlanName'])) { + $this->resourcePlanName = $vals['resourcePlanName']; + } + if (isset($vals['entityType'])) { + $this->entityType = $vals['entityType']; + } + if (isset($vals['entityName'])) { + $this->entityName = $vals['entityName']; + } + if (isset($vals['poolName'])) { + $this->poolName = $vals['poolName']; + } + if (isset($vals['ordering'])) { + $this->ordering = $vals['ordering']; + } + } + } + + public function getName() { + return 'WMMapping'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->resourcePlanName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->entityType); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->entityName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->poolName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->ordering); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('WMMapping'); + if ($this->resourcePlanName !== null) { + $xfer += $output->writeFieldBegin('resourcePlanName', TType::STRING, 1); + $xfer += $output->writeString($this->resourcePlanName); + $xfer += $output->writeFieldEnd(); + } + if ($this->entityType !== null) { + $xfer += $output->writeFieldBegin('entityType', TType::STRING, 2); + $xfer += $output->writeString($this->entityType); + $xfer += $output->writeFieldEnd(); + } + if ($this->entityName !== null) { + $xfer += $output->writeFieldBegin('entityName', TType::STRING, 3); + $xfer += $output->writeString($this->entityName); + $xfer += $output->writeFieldEnd(); + } + if ($this->poolName !== null) { + $xfer += $output->writeFieldBegin('poolName', TType::STRING, 4); + $xfer += $output->writeString($this->poolName); + $xfer += $output->writeFieldEnd(); + } + if ($this->ordering !== null) { + $xfer += $output->writeFieldBegin('ordering', TType::I32, 5); + $xfer += $output->writeI32($this->ordering); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class MetaException extends TException { static $_TSPEC; diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index c67a7816da..1ee7fe7768 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -14402,6 +14402,464 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class WMResourcePlan: + """ + Attributes: + - name + - status + - queryParallelism + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'name', None, None, ), # 1 + (2, TType.STRING, 'status', None, None, ), # 2 + (3, TType.I32, 'queryParallelism', None, None, ), # 3 + ) + + def __init__(self, name=None, status=None, queryParallelism=None,): + self.name = name + self.status = status + self.queryParallelism = queryParallelism + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.status = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.queryParallelism = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WMResourcePlan') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name) + oprot.writeFieldEnd() + if self.status is not None: + oprot.writeFieldBegin('status', TType.STRING, 2) + oprot.writeString(self.status) + oprot.writeFieldEnd() + if self.queryParallelism is not None: + oprot.writeFieldBegin('queryParallelism', TType.I32, 3) + oprot.writeI32(self.queryParallelism) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.name is None: + raise TProtocol.TProtocolException(message='Required field name is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.name) + value = (value * 31) ^ hash(self.status) + value = (value * 31) ^ hash(self.queryParallelism) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class WMPool: + """ + Attributes: + - resourcePlanName + - poolName + - parentPoolName + - allocFraction + - queryParallelism + - schedulingPolicy + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'resourcePlanName', None, None, ), # 1 + (2, TType.STRING, 'poolName', None, None, ), # 2 + (3, TType.STRING, 'parentPoolName', None, None, ), # 3 + (4, TType.DOUBLE, 'allocFraction', None, None, ), # 4 + (5, TType.I32, 'queryParallelism', None, None, ), # 5 + (6, TType.STRING, 'schedulingPolicy', None, None, ), # 6 + ) + + def __init__(self, resourcePlanName=None, poolName=None, parentPoolName=None, allocFraction=None, queryParallelism=None, schedulingPolicy=None,): + self.resourcePlanName = resourcePlanName + self.poolName = poolName + self.parentPoolName = parentPoolName + self.allocFraction = allocFraction + self.queryParallelism = queryParallelism + self.schedulingPolicy = schedulingPolicy + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.poolName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.parentPoolName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.DOUBLE: + self.allocFraction = iprot.readDouble() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.queryParallelism = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.schedulingPolicy = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WMPool') + if self.resourcePlanName is not None: + oprot.writeFieldBegin('resourcePlanName', TType.STRING, 1) + oprot.writeString(self.resourcePlanName) + oprot.writeFieldEnd() + if self.poolName is not None: + oprot.writeFieldBegin('poolName', TType.STRING, 2) + oprot.writeString(self.poolName) + oprot.writeFieldEnd() + if self.parentPoolName is not None: + oprot.writeFieldBegin('parentPoolName', TType.STRING, 3) + oprot.writeString(self.parentPoolName) + oprot.writeFieldEnd() + if self.allocFraction is not None: + oprot.writeFieldBegin('allocFraction', TType.DOUBLE, 4) + oprot.writeDouble(self.allocFraction) + oprot.writeFieldEnd() + if self.queryParallelism is not None: + oprot.writeFieldBegin('queryParallelism', TType.I32, 5) + oprot.writeI32(self.queryParallelism) + oprot.writeFieldEnd() + if self.schedulingPolicy is not None: + oprot.writeFieldBegin('schedulingPolicy', TType.STRING, 6) + oprot.writeString(self.schedulingPolicy) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.resourcePlanName is None: + raise TProtocol.TProtocolException(message='Required field resourcePlanName is unset!') + if self.poolName is None: + raise TProtocol.TProtocolException(message='Required field poolName is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.resourcePlanName) + value = (value * 31) ^ hash(self.poolName) + value = (value * 31) ^ hash(self.parentPoolName) + value = (value * 31) ^ hash(self.allocFraction) + value = (value * 31) ^ hash(self.queryParallelism) + value = (value * 31) ^ hash(self.schedulingPolicy) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class WMTrigger: + """ + Attributes: + - resourcePlanName + - poolName + - triggerExpression + - actionExpression + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'resourcePlanName', None, None, ), # 1 + (2, TType.STRING, 'poolName', None, None, ), # 2 + (3, TType.STRING, 'triggerExpression', None, None, ), # 3 + (4, TType.STRING, 'actionExpression', None, None, ), # 4 + ) + + def __init__(self, resourcePlanName=None, poolName=None, triggerExpression=None, actionExpression=None,): + self.resourcePlanName = resourcePlanName + self.poolName = poolName + self.triggerExpression = triggerExpression + self.actionExpression = actionExpression + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.poolName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.triggerExpression = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.actionExpression = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WMTrigger') + if self.resourcePlanName is not None: + oprot.writeFieldBegin('resourcePlanName', TType.STRING, 1) + oprot.writeString(self.resourcePlanName) + oprot.writeFieldEnd() + if self.poolName is not None: + oprot.writeFieldBegin('poolName', TType.STRING, 2) + oprot.writeString(self.poolName) + oprot.writeFieldEnd() + if self.triggerExpression is not None: + oprot.writeFieldBegin('triggerExpression', TType.STRING, 3) + oprot.writeString(self.triggerExpression) + oprot.writeFieldEnd() + if self.actionExpression is not None: + oprot.writeFieldBegin('actionExpression', TType.STRING, 4) + oprot.writeString(self.actionExpression) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.resourcePlanName is None: + raise TProtocol.TProtocolException(message='Required field resourcePlanName is unset!') + if self.poolName is None: + raise TProtocol.TProtocolException(message='Required field poolName is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.resourcePlanName) + value = (value * 31) ^ hash(self.poolName) + value = (value * 31) ^ hash(self.triggerExpression) + value = (value * 31) ^ hash(self.actionExpression) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class WMMapping: + """ + Attributes: + - resourcePlanName + - entityType + - entityName + - poolName + - ordering + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'resourcePlanName', None, None, ), # 1 + (2, TType.STRING, 'entityType', None, None, ), # 2 + (3, TType.STRING, 'entityName', None, None, ), # 3 + (4, TType.STRING, 'poolName', None, None, ), # 4 + (5, TType.I32, 'ordering', None, None, ), # 5 + ) + + def __init__(self, resourcePlanName=None, entityType=None, entityName=None, poolName=None, ordering=None,): + self.resourcePlanName = resourcePlanName + self.entityType = entityType + self.entityName = entityName + self.poolName = poolName + self.ordering = ordering + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.entityType = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.entityName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.poolName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.ordering = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WMMapping') + if self.resourcePlanName is not None: + oprot.writeFieldBegin('resourcePlanName', TType.STRING, 1) + oprot.writeString(self.resourcePlanName) + oprot.writeFieldEnd() + if self.entityType is not None: + oprot.writeFieldBegin('entityType', TType.STRING, 2) + oprot.writeString(self.entityType) + oprot.writeFieldEnd() + if self.entityName is not None: + oprot.writeFieldBegin('entityName', TType.STRING, 3) + oprot.writeString(self.entityName) + oprot.writeFieldEnd() + if self.poolName is not None: + oprot.writeFieldBegin('poolName', TType.STRING, 4) + oprot.writeString(self.poolName) + oprot.writeFieldEnd() + if self.ordering is not None: + oprot.writeFieldBegin('ordering', TType.I32, 5) + oprot.writeI32(self.ordering) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.resourcePlanName is None: + raise TProtocol.TProtocolException(message='Required field resourcePlanName is unset!') + if self.entityType is None: + raise TProtocol.TProtocolException(message='Required field entityType is unset!') + if self.entityName is None: + raise TProtocol.TProtocolException(message='Required field entityName is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.resourcePlanName) + value = (value * 31) ^ hash(self.entityType) + value = (value * 31) ^ hash(self.entityName) + value = (value * 31) ^ hash(self.poolName) + value = (value * 31) ^ hash(self.ordering) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class MetaException(TException): """ Attributes: diff --git standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index ddb7e18274..deef602b27 100644 --- standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -3244,6 +3244,106 @@ class TableMeta ::Thrift::Struct.generate_accessors self end +class WMResourcePlan + include ::Thrift::Struct, ::Thrift::Struct_Union + NAME = 1 + STATUS = 2 + QUERYPARALLELISM = 3 + + FIELDS = { + NAME => {:type => ::Thrift::Types::STRING, :name => 'name'}, + STATUS => {:type => ::Thrift::Types::STRING, :name => 'status', :optional => true}, + QUERYPARALLELISM => {:type => ::Thrift::Types::I32, :name => 'queryParallelism', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field name is unset!') unless @name + end + + ::Thrift::Struct.generate_accessors self +end + +class WMPool + include ::Thrift::Struct, ::Thrift::Struct_Union + RESOURCEPLANNAME = 1 + POOLNAME = 2 + PARENTPOOLNAME = 3 + ALLOCFRACTION = 4 + QUERYPARALLELISM = 5 + SCHEDULINGPOLICY = 6 + + FIELDS = { + RESOURCEPLANNAME => {:type => ::Thrift::Types::STRING, :name => 'resourcePlanName'}, + POOLNAME => {:type => ::Thrift::Types::STRING, :name => 'poolName'}, + PARENTPOOLNAME => {:type => ::Thrift::Types::STRING, :name => 'parentPoolName', :optional => true}, + ALLOCFRACTION => {:type => ::Thrift::Types::DOUBLE, :name => 'allocFraction', :optional => true}, + QUERYPARALLELISM => {:type => ::Thrift::Types::I32, :name => 'queryParallelism', :optional => true}, + SCHEDULINGPOLICY => {:type => ::Thrift::Types::STRING, :name => 'schedulingPolicy', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field resourcePlanName is unset!') unless @resourcePlanName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field poolName is unset!') unless @poolName + end + + ::Thrift::Struct.generate_accessors self +end + +class WMTrigger + include ::Thrift::Struct, ::Thrift::Struct_Union + RESOURCEPLANNAME = 1 + POOLNAME = 2 + TRIGGEREXPRESSION = 3 + ACTIONEXPRESSION = 4 + + FIELDS = { + RESOURCEPLANNAME => {:type => ::Thrift::Types::STRING, :name => 'resourcePlanName'}, + POOLNAME => {:type => ::Thrift::Types::STRING, :name => 'poolName'}, + TRIGGEREXPRESSION => {:type => ::Thrift::Types::STRING, :name => 'triggerExpression', :optional => true}, + ACTIONEXPRESSION => {:type => ::Thrift::Types::STRING, :name => 'actionExpression', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field resourcePlanName is unset!') unless @resourcePlanName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field poolName is unset!') unless @poolName + end + + ::Thrift::Struct.generate_accessors self +end + +class WMMapping + include ::Thrift::Struct, ::Thrift::Struct_Union + RESOURCEPLANNAME = 1 + ENTITYTYPE = 2 + ENTITYNAME = 3 + POOLNAME = 4 + ORDERING = 5 + + FIELDS = { + RESOURCEPLANNAME => {:type => ::Thrift::Types::STRING, :name => 'resourcePlanName'}, + ENTITYTYPE => {:type => ::Thrift::Types::STRING, :name => 'entityType'}, + ENTITYNAME => {:type => ::Thrift::Types::STRING, :name => 'entityName'}, + POOLNAME => {:type => ::Thrift::Types::STRING, :name => 'poolName', :optional => true}, + ORDERING => {:type => ::Thrift::Types::I32, :name => 'ordering', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field resourcePlanName is unset!') unless @resourcePlanName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field entityType is unset!') unless @entityType + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field entityName is unset!') unless @entityName + end + + ::Thrift::Struct.generate_accessors self +end + class MetaException < ::Thrift::Exception include ::Thrift::Struct, ::Thrift::Struct_Union def initialize(message=nil) diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMMapping.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMMapping.java new file mode 100644 index 0000000000..ec0ac3f148 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMMapping.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +public class MWMMapping { + private MWMResourcePlan resourcePlan; + private EntityType entityType; + private String entityName; + private MWMPool pool; + private Integer ordering; + + public enum EntityType { + USER, + GROUP + } + + public MWMMapping(MWMResourcePlan resourcePlan, EntityType entityType, String entityName, + MWMPool pool, Integer ordering) { + this.resourcePlan = resourcePlan; + this.entityType = entityType; + this.entityName = entityName; + this.pool = pool; + this.ordering = ordering; + } + + public MWMResourcePlan getResourcePlan() { + return resourcePlan; + } + + public void setResourcePlan(MWMResourcePlan resourcePlan) { + this.resourcePlan = resourcePlan; + } + + public EntityType getEntityType() { + return entityType; + } + + public void setEntityType(EntityType entityType) { + this.entityType = entityType; + } + + public String getEntityName() { + return entityName; + } + + public void setEntityName(String entityName) { + this.entityName = entityName; + } + + public MWMPool getPool() { + return pool; + } + + public void setPool(MWMPool pool) { + this.pool = pool; + } + + public Integer getOrdering() { + return ordering; + } + + public void setOrdering(Integer ordering) { + this.ordering = ordering; + } + +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java new file mode 100644 index 0000000000..c7cd983709 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +import java.util.Set; + +public class MWMPool { + private MWMResourcePlan resourcePlan; + private String path; + private MWMPool parentPool; + private Double allocFraction; + private Integer queryParallelism; + private Set triggers; + + public MWMPool() {} + + public MWMPool(MWMResourcePlan resourcePlan, String path, MWMPool parentPool, + Double allocFraction, Integer queryParallelism) { + this.resourcePlan = resourcePlan; + this.path = path; + this.parentPool = parentPool; + this.allocFraction = allocFraction; + this.queryParallelism = queryParallelism; + } + + public MWMResourcePlan getResourcePlan() { + return resourcePlan; + } + + public void setResourcePlan(MWMResourcePlan resourcePlan) { + this.resourcePlan = resourcePlan; + } + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + + public MWMPool getParentPool() { + return parentPool; + } + + public void setParentPool(MWMPool parentPool) { + this.parentPool = parentPool; + } + + public Double getAllocFraction() { + return allocFraction; + } + + public void setAllocFraction(Double allocFraction) { + this.allocFraction = allocFraction; + } + + public Integer getQueryParallelism() { + return queryParallelism; + } + + public void setQueryParallelism(Integer queryParallelism) { + this.queryParallelism = queryParallelism; + } + + public Set getTriggers() { + return triggers; + } + + public void setTriggers(Set triggers) { + this.triggers = triggers; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMResourcePlan.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMResourcePlan.java new file mode 100644 index 0000000000..3ff924fe00 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMResourcePlan.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +import java.util.List; + +/** + * Storage class for ResourcePlan. + */ +public class MWMResourcePlan { + private String name; + private Integer queryParallelism; + private Status status; + private List pools; + private List triggers; + private List mappings; + + public enum Status { + ACTIVE, + ENABLED, + DISABLED + } + + public MWMResourcePlan() {} + + public MWMResourcePlan(String name, Integer queryParallelism, Status status) { + this.name = name; + this.queryParallelism = queryParallelism; + this.status = status; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public Integer getQueryParallelism() { + return queryParallelism; + } + + public void setQueryParallelism(Integer queryParallelism) { + this.queryParallelism = queryParallelism; + } + + public Status getStatus() { + return status; + } + + public void setStatus(Status status) { + this.status = status; + } + + public List getPools() { + return pools; + } + + public void setPools(List pools) { + this.pools = pools; + } + + public List getTriggers() { + return triggers; + } + + public void setTriggers(List triggers) { + this.triggers = triggers; + } + + public List getMappings() { + return mappings; + } + + public void setMappings(List mappings) { + this.mappings = mappings; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMTrigger.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMTrigger.java new file mode 100644 index 0000000000..081c1aec5d --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMTrigger.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +import java.util.Set; + +public class MWMTrigger { + private MWMResourcePlan resourcePlan; + private String name; + private String triggerExpression; + private String actionExpression; + private Set pools; + + public MWMTrigger(MWMResourcePlan resourcePlan, String name, + String triggerExpression, String actionExpression, Set pools) { + this.resourcePlan = resourcePlan; + this.name = name; + this.triggerExpression = triggerExpression; + this.actionExpression = actionExpression; + this.pools = pools; + } + + public MWMResourcePlan getResourcePlan() { + return resourcePlan; + } + + public void setResourcePlan(MWMResourcePlan resourcePlan) { + this.resourcePlan = resourcePlan; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getTriggerExpression() { + return triggerExpression; + } + + public void setTriggerExpression(String triggerExpression) { + this.triggerExpression = triggerExpression; + } + + public String getActionExpression() { + return actionExpression; + } + + public void setActionExpression(String actionExpression) { + this.actionExpression = actionExpression; + } + + public Set getPools() { + return pools; + } + + public void setPools(Set pools) { + this.pools = pools; + } +} diff --git standalone-metastore/src/main/resources/package.jdo standalone-metastore/src/main/resources/package.jdo index 570fd44c21..5ce687b2de 100644 --- standalone-metastore/src/main/resources/package.jdo +++ standalone-metastore/src/main/resources/package.jdo @@ -1082,6 +1082,117 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git standalone-metastore/src/main/thrift/hive_metastore.thrift standalone-metastore/src/main/thrift/hive_metastore.thrift index 8908ff2139..51dc4ea88b 100644 --- standalone-metastore/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -1030,6 +1030,36 @@ struct TableMeta { 4: optional string comments; } +struct WMResourcePlan { + 1: required string name; + 2: optional string status; + 3: optional i32 queryParallelism; +} + +struct WMPool { + 1: required string resourcePlanName; + 2: required string poolName; + 3: optional string parentPoolName; + 4: optional double allocFraction; + 5: optional i32 queryParallelism; + 6: optional string schedulingPolicy; +} + +struct WMTrigger { + 1: required string resourcePlanName; + 2: required string poolName; + 3: optional string triggerExpression; + 4: optional string actionExpression; +} + +struct WMMapping { + 1: required string resourcePlanName; + 2: required string entityType; + 3: required string entityName; + 4: optional string poolName; + 5: optional i32 ordering; +} + exception MetaException { 1: string message }