diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 7196756da6..3ed4c1489d 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -74,6 +74,8 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -982,7 +984,7 @@ public String getMetastoreDbUuid() throws MetaException { @Override public void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize) - throws AlreadyExistsException, MetaException { + throws AlreadyExistsException, InvalidObjectException, MetaException { objectStore.createResourcePlan(resourcePlan, defaultPoolSize); } @@ -1043,4 +1045,43 @@ public void dropWMTrigger(String resourcePlanName, String triggerName) throws NoSuchObjectException, MetaException { return objectStore.getTriggersForResourcePlan(resourcePlanName); } + + @Override + public void createOrUpdatePool(WMPool pool, String newPoolPath, boolean update) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, + MetaException { + objectStore.createOrUpdatePool(pool, newPoolPath, update); + } + + @Override + public void dropWMPool(String resourcePlanName, String poolPath) + throws NoSuchObjectException, InvalidOperationException, MetaException { + objectStore.dropWMPool(resourcePlanName, poolPath); + } + + @Override + public void createOrUpdateWMMapping(WMMapping mapping, boolean update) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, + MetaException { + objectStore.createOrUpdateWMMapping(mapping, update); + } + + @Override + public void dropWMMapping(WMMapping mapping) + throws NoSuchObjectException, InvalidOperationException, MetaException { + objectStore.dropWMMapping(mapping); + } + + @Override + public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath) throws AlreadyExistsException, NoSuchObjectException, + InvalidOperationException, MetaException { + objectStore.createWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath); + } + + @Override + public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException { + objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath); + } } diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java index 0506f67621..c793916f2d 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.api.WMPoolTrigger; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; @@ -77,6 +78,8 @@ protected void setupTriggers(final List triggers) throws Exception { WMPool pool = new WMPool("rp", "llap"); pool.setAllocFraction(1.0f); pool.setQueryParallelism(1); + WMMapping mapping = new WMMapping("rp", "DEFAULT", ""); + mapping.setPoolPath("llap"); WMFullResourcePlan rp = new WMFullResourcePlan( new WMResourcePlan("rp"), Lists.newArrayList(pool)); rp.getPlan().setDefaultPoolPath("llap"); @@ -86,4 +89,4 @@ protected void setupTriggers(final List triggers) throws Exception { } wm.updateResourcePlanAsync(rp).get(10, TimeUnit.SECONDS); } -} \ No newline at end of file +} diff --git metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql index 9f12153d91..8eb197c82b 100644 --- metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql +++ metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql @@ -2,11 +2,10 @@ CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) N CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NAME"); ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID"); -CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, PARENT_POOL_ID BIGINT, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024)); +CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024)); CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH"); ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID"); ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; -ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024)); diff --git metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql index 1d21fa2f47..eff6f76e06 100644 --- metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql +++ metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql @@ -112,7 +112,7 @@ CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NU CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL, DEFAULT_POOL_ID BIGINT); -CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, PARENT_POOL_ID BIGINT, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024)); +CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024)); CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024)); @@ -356,8 +356,6 @@ ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID"); ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; -ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; - ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID"); diff --git metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql index 68d8d37e61..18bd52557d 100644 --- metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql +++ metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql @@ -989,6 +989,75 @@ ON t.RP_ID = r.RP_ID" ); +CREATE TABLE IF NOT EXISTS `WM_POOLS` ( + `RP_NAME` string, + `PATH` string, + `ALLOC_FRACTION` double, + `QUERY_PARALLELISM` int, + `SCHEDULING_POLICY` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + WM_RESOURCEPLAN.NAME, + WM_POOL.PATH, + WM_POOL.ALLOC_FRACTION, + WM_POOL.QUERY_PARALLELISM, + WM_POOL.SCHEDULING_POLICY +FROM + WM_POOL +JOIN + WM_RESOURCEPLAN +ON + WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID" +); + +CREATE TABLE IF NOT EXISTS `WM_TRIGGERS_TO_POOLS` ( + `RP_NAME` string, + `TRIGGER_NAME` string, + `POOL_PATH` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + WM_RESOURCEPLAN.NAME RP_NAME, + WM_TRIGGER.NAME TRIGGER_NAME, + WM_POOL.PATH POOL_PATH +FROM + WM_POOL_TO_TRIGGER +JOIN WM_POOL ON WM_POOL_TO_TRIGGER.POOL_ID = WM_POOL.POOL_ID +JOIN WM_TRIGGER ON WM_POOL_TO_TRIGGER.TRIGGER_ID = WM_TRIGGER.TRIGGER_ID +JOIN WM_RESOURCEPLAN ON WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID" +); + +CREATE TABLE IF NOT EXISTS `WM_MAPPINGS` ( + `RP_NAME` string, + `ENTITY_TYPE` string, + `ENTITY_NAME` string, + `POOL_PATH` string, + `ORDERING` int +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + WM_RESOURCEPLAN.NAME, + ENTITY_TYPE, + ENTITY_NAME, + WM_POOL.PATH, + ORDERING +FROM + WM_MAPPING +JOIN WM_RESOURCEPLAN ON WM_MAPPING.RP_ID = WM_RESOURCEPLAN.RP_ID +JOIN WM_POOL ON WM_POOL.POOL_ID = WM_MAPPING.POOL_ID" +); + + DROP DATABASE IF EXISTS INFORMATION_SCHEMA; CREATE DATABASE INFORMATION_SCHEMA; diff --git metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql index 06d82e0e35..945bda4fa1 100644 --- metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql +++ metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql @@ -17,7 +17,6 @@ CREATE TABLE WM_POOL POOL_ID bigint NOT NULL, RP_ID bigint NOT NULL, PATH nvarchar(1024) NOT NULL, - PARENT_POOL_ID bigint, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM int, SCHEDULING_POLICY nvarchar(1024) @@ -26,8 +25,9 @@ CREATE TABLE WM_POOL ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID); CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, "NAME"); + ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); -ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID); + ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID); CREATE TABLE WM_TRIGGER diff --git metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql index b189c31ff6..837b67b142 100644 --- metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql +++ metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql @@ -613,7 +613,6 @@ CREATE TABLE WM_POOL POOL_ID bigint NOT NULL, RP_ID bigint NOT NULL, PATH nvarchar(1024) NOT NULL, - PARENT_POOL_ID bigint, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM int, SCHEDULING_POLICY nvarchar(1024) @@ -935,8 +934,6 @@ CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); -ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); - -- ----------------------------------------------------------------------------------------------------------------------------------------------- -- Transaction and Lock Tables diff --git metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql index cff0b85fbd..293c4d8038 100644 --- metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql +++ metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql @@ -13,14 +13,12 @@ CREATE TABLE IF NOT EXISTS WM_POOL `POOL_ID` bigint(20) NOT NULL, `RP_ID` bigint(20) NOT NULL, `PATH` varchar(767) NOT NULL, - `PARENT_POOL_ID` bigint(20), `ALLOC_FRACTION` DOUBLE, `QUERY_PARALLELISM` int(11), `SCHEDULING_POLICY` varchar(767), PRIMARY KEY (`POOL_ID`), KEY `UNIQUE_WM_POOL` (`RP_ID`, `PATH`), CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`), - CONSTRAINT `WM_POOL_FK2` FOREIGN KEY (`PARENT_POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; ALTER TABLE `WM_RESOURCEPLAN` ADD CONSTRAINT `WM_RESOURCEPLAN_FK1` FOREIGN KEY (`DEFAULT_POOL_ID`) REFERENCES `WM_POOL`(`POOL_ID`); diff --git metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql index 702a929582..f7a7868a2f 100644 --- metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql +++ metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql @@ -864,14 +864,12 @@ CREATE TABLE IF NOT EXISTS WM_POOL `POOL_ID` bigint(20) NOT NULL, `RP_ID` bigint(20) NOT NULL, `PATH` varchar(767) NOT NULL, - `PARENT_POOL_ID` bigint(20), `ALLOC_FRACTION` DOUBLE, `QUERY_PARALLELISM` int(11), `SCHEDULING_POLICY` varchar(767), PRIMARY KEY (`POOL_ID`), KEY `UNIQUE_WM_POOL` (`RP_ID`, `PATH`), CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`), - CONSTRAINT `WM_POOL_FK2` FOREIGN KEY (`PARENT_POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; ALTER TABLE `WM_RESOURCEPLAN` ADD CONSTRAINT `WM_RESOURCEPLAN_FK1` FOREIGN KEY (`DEFAULT_POOL_ID`) REFERENCES `WM_POOL`(`POOL_ID`); diff --git metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql index ceab459d31..596bb60024 100644 --- metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql +++ metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql @@ -17,7 +17,6 @@ CREATE TABLE WM_POOL POOL_ID bigint NOT NULL, RP_ID bigint NOT NULL, PATH nvarchar(1024) NOT NULL, - PARENT_POOL_ID bigint, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM int, SCHEDULING_POLICY nvarchar(1024) @@ -27,7 +26,6 @@ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID); CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, "NAME"); ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); -ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID); ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID); diff --git metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql index 6e9c7ff5d1..79add9bcb5 100644 --- metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql +++ metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql @@ -594,7 +594,6 @@ CREATE TABLE WM_POOL POOL_ID bigint NOT NULL, RP_ID bigint NOT NULL, PATH nvarchar(1024) NOT NULL, - PARENT_POOL_ID bigint, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM int, SCHEDULING_POLICY nvarchar(1024) @@ -878,8 +877,6 @@ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFA ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); -ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID); - CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME"); ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); diff --git metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql index 07fb6b7ac2..bd588c40c6 100644 --- metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql +++ metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql @@ -17,7 +17,6 @@ CREATE TABLE "WM_POOL" ( "POOL_ID" bigint NOT NULL, "RP_ID" bigint NOT NULL, "PATH" character varying(1024) NOT NULL, - "PARENT_POOL_ID" bigint, "ALLOC_FRACTION" DOUBLE, "QUERY_PARALLELISM" integer, "SCHEDULING_POLICY" character varying(1024) @@ -31,8 +30,6 @@ ALTER TABLE ONLY "WM_POOL" ALTER TABLE ONLY "WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; -ALTER TABLE ONLY "WM_POOL" - ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; ALTER TABLE ONLY "WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; diff --git metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql index 750460475b..72e81c5c03 100644 --- metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql +++ metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql @@ -631,7 +631,6 @@ CREATE TABLE "WM_POOL" ( "POOL_ID" bigint NOT NULL, "RP_ID" bigint NOT NULL, "PATH" character varying(1024) NOT NULL, - "PARENT_POOL_ID" bigint, "ALLOC_FRACTION" DOUBLE, "QUERY_PARALLELISM" integer, "SCHEDULING_POLICY" character varying(1024) diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index ed0f4d8ad7..119ed819d5 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -7553,7 +7553,7 @@ public WMDropTriggerResponse drop_wm_trigger(WMDropTriggerRequest request) getMS().dropWMTrigger(request.getResourcePlanName(), request.getTriggerName()); return new WMDropTriggerResponse(); } catch (MetaException e) { - LOG.error("Exception while trying to retrieve resource plans", e); + LOG.error("Exception while trying to drop trigger.", e); throw e; } } @@ -7572,6 +7572,75 @@ public WMGetTriggersForResourePlanResponse get_triggers_for_resourceplan( throw e; } } + + @Override + public WMCreateOrUpdatePoolResponse create_or_update_wm_pool( + WMCreateOrUpdatePoolRequest request) throws AlreadyExistsException, NoSuchObjectException, + InvalidObjectException, MetaException, TException { + try { + getMS().createOrUpdatePool(request.getPool(), request.getNewPoolPath(), request.isUpdate()); + return new WMCreateOrUpdatePoolResponse(); + } catch (MetaException e) { + LOG.error("Exception while trying to create or update WMPool", e); + throw e; + } + } + + @Override + public WMDropPoolResponse drop_wm_pool(WMDropPoolRequest request) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + try { + getMS().dropWMPool(request.getResourcePlanName(), request.getPoolPath()); + return new WMDropPoolResponse(); + } catch (MetaException e) { + LOG.error("Exception while trying to drop WMPool", e); + throw e; + } + } + + @Override + public WMCreateOrUpdateMappingResponse create_or_update_wm_mapping( + WMCreateOrUpdateMappingRequest request) throws AlreadyExistsException, + NoSuchObjectException, InvalidObjectException, MetaException, TException { + try { + getMS().createOrUpdateWMMapping(request.getMapping(), request.isUpdate()); + return new WMCreateOrUpdateMappingResponse(); + } catch (MetaException e) { + LOG.error("Exception while trying to drop WMPool", e); + throw e; + } + } + + @Override + public WMDropMappingResponse drop_wm_mapping(WMDropMappingRequest request) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + try { + getMS().dropWMMapping(request.getMapping()); + return new WMDropMappingResponse(); + } catch (MetaException e) { + LOG.error("Exception while trying to drop WMPool", e); + throw e; + } + } + + @Override + public WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping( + WMCreateOrDropTriggerToPoolMappingRequest request) throws AlreadyExistsException, + NoSuchObjectException, InvalidObjectException, MetaException, TException { + try { + if (request.isDrop()) { + getMS().dropWMTriggerToPoolMapping( + request.getResourcePlanName(), request.getTriggerName(), request.getPoolPath()); + } else { + getMS().createWMTriggerToPoolMapping( + request.getResourcePlanName(), request.getTriggerName(), request.getPoolPath()); + } + return new WMCreateOrDropTriggerToPoolMappingResponse(); + } catch (MetaException e) { + LOG.error("Exception while trying to drop WMPool", e); + throw e; + } + } } public static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, HiveConf hiveConf) diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index b5a9b79045..a75838cd8a 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -2724,4 +2724,52 @@ public void dropWMTrigger(String resourcePlanName, String triggerName) request.setResourcePlanName(resourcePlan); return client.get_triggers_for_resourceplan(request).getTriggers(); } + + @Override + public void createOrUpdateWMPool(WMPool pool, String newPoolPath, boolean isUpdate) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + WMCreateOrUpdatePoolRequest request = new WMCreateOrUpdatePoolRequest(); + request.setPool(pool); + request.setNewPoolPath(newPoolPath); + request.setUpdate(isUpdate); + client.create_or_update_wm_pool(request); + } + + @Override + public void dropWMPool(String resourcePlanName, String poolPath) + throws NoSuchObjectException, MetaException, TException { + WMDropPoolRequest request = new WMDropPoolRequest(); + request.setResourcePlanName(resourcePlanName); + request.setPoolPath(poolPath); + client.drop_wm_pool(request); + } + + @Override + public void createOrUpdateWMMapping(WMMapping mapping, boolean isUpdate) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + WMCreateOrUpdateMappingRequest request = new WMCreateOrUpdateMappingRequest(); + request.setMapping(mapping); + request.setUpdate(isUpdate); + client.create_or_update_wm_mapping(request); + } + + @Override + public void dropWMMapping(WMMapping mapping) + throws NoSuchObjectException, MetaException, TException { + WMDropMappingRequest request = new WMDropMappingRequest(); + request.setMapping(mapping); + client.drop_wm_mapping(request); + } + + @Override + public void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath, boolean shouldDrop) throws AlreadyExistsException, NoSuchObjectException, + InvalidObjectException, MetaException, TException { + WMCreateOrDropTriggerToPoolMappingRequest request = new WMCreateOrDropTriggerToPoolMappingRequest(); + request.setResourcePlanName(resourcePlanName); + request.setTriggerName(triggerName); + request.setPoolPath(poolPath); + request.setDrop(shouldDrop); + client.create_or_drop_wm_trigger_to_pool_mapping(request); + } } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 2cb255eb02..65d440bf08 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -107,6 +107,8 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -1802,4 +1804,20 @@ void dropWMTrigger(String resourcePlanName, String triggerName) List getTriggersForResourcePlan(String resourcePlan) throws NoSuchObjectException, MetaException, TException; + + void createOrUpdateWMPool(WMPool pool, String newPoolPath, boolean isUpdate) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException; + + void dropWMPool(String resourcePlanName, String poolPath) + throws NoSuchObjectException, MetaException, TException; + + void createOrUpdateWMMapping(WMMapping mapping, boolean isUpdate) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException; + + void dropWMMapping(WMMapping mapping) + throws NoSuchObjectException, MetaException, TException; + + void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath, boolean shouldDrop) throws AlreadyExistsException, NoSuchObjectException, + InvalidObjectException, MetaException, TException; } diff --git metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 95aeb2588f..fbf18e0a66 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -70,6 +70,8 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -942,7 +944,7 @@ public String getMetastoreDbUuid() throws MetaException { @Override public void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize) - throws AlreadyExistsException, MetaException { + throws AlreadyExistsException, InvalidObjectException, MetaException { objectStore.createResourcePlan(resourcePlan, defaultPoolSize); } @@ -1004,4 +1006,43 @@ public void dropWMTrigger(String resourcePlanName, String triggerName) throws NoSuchObjectException, MetaException { return objectStore.getTriggersForResourcePlan(resourcePlanName); } + + @Override + public void createOrUpdatePool(WMPool pool, String newPoolPath, boolean update) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, + MetaException { + objectStore.createOrUpdatePool(pool, newPoolPath, update); + } + + @Override + public void dropWMPool(String resourcePlanName, String poolPath) + throws NoSuchObjectException, InvalidOperationException, MetaException { + objectStore.dropWMPool(resourcePlanName, poolPath); + } + + @Override + public void createOrUpdateWMMapping(WMMapping mapping, boolean update) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, + MetaException { + objectStore.createOrUpdateWMMapping(mapping, update); + } + + @Override + public void dropWMMapping(WMMapping mapping) + throws NoSuchObjectException, InvalidOperationException, MetaException { + objectStore.dropWMMapping(mapping); + } + + @Override + public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath) throws AlreadyExistsException, NoSuchObjectException, + InvalidOperationException, MetaException { + objectStore.createWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath); + } + + @Override + public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException { + objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath); + } } diff --git metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index e8400be411..f1aa12c979 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; @@ -70,6 +71,8 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -1008,4 +1011,37 @@ public void dropWMTrigger(String resourcePlanName, String triggerName) throws NoSuchObjectException, MetaException { return null; } + + @Override + public void createOrUpdatePool(WMPool pool, String newPoolPath, boolean update) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, + MetaException { + } + + @Override + public void dropWMPool(String resourcePlanName, String poolPath) + throws NoSuchObjectException, InvalidOperationException, MetaException { + } + + @Override + public void createOrUpdateWMMapping(WMMapping mapping, boolean update) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, + MetaException { + } + + @Override + public void dropWMMapping(WMMapping mapping) + throws NoSuchObjectException, InvalidOperationException, MetaException { + } + + @Override + public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath) throws AlreadyExistsException, NoSuchObjectException, + InvalidOperationException, MetaException { + } + + @Override + public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException { + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 0a34633fa4..8d2888fd66 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -23,12 +23,9 @@ import java.util.concurrent.ExecutionException; -import com.google.common.util.concurrent.FutureCallback; - import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; -import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import java.io.BufferedWriter; import java.io.DataOutputStream; @@ -109,6 +106,8 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.TxnInfo; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; import org.apache.hadoop.hive.metastore.api.WMTrigger; @@ -183,6 +182,7 @@ import org.apache.hadoop.hive.ql.plan.ColStatistics; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; +import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc; import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; @@ -196,6 +196,8 @@ import org.apache.hadoop.hive.ql.plan.DropIndexDesc; import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc; +import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc; import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.FileMergeDesc; import org.apache.hadoop.hive.ql.plan.GrantDesc; @@ -239,6 +241,8 @@ import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; +import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc; +import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.security.authorization.AuthorizationUtils; import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationTranslator; @@ -649,6 +653,26 @@ public int execute(DriverContext driverContext) { if (work.getDropWMTriggerDesc() != null) { return dropWMTrigger(db, work.getDropWMTriggerDesc()); } + + if (work.getWmPoolDesc() != null) { + return createOrAlterWMPool(db, work.getWmPoolDesc()); + } + + if (work.getDropWMPoolDesc() != null) { + return dropWMPool(db, work.getDropWMPoolDesc()); + } + + if (work.getWmMappingDesc() != null) { + return createOrAlterWMMapping(db, work.getWmMappingDesc()); + } + + if (work.getDropWMMappingDesc() != null) { + return dropWMMapping(db, work.getDropWMMappingDesc()); + } + + if (work.getTriggerToPoolMappingDesc() != null) { + return createOrDropTriggerToPoolMapping(db, work.getTriggerToPoolMappingDesc()); + } } catch (Throwable e) { failed(e); return 1; @@ -779,6 +803,52 @@ private int dropWMTrigger(Hive db, DropWMTriggerDesc desc) throws HiveException return 0; } + private int createOrAlterWMPool(Hive db, CreateOrAlterWMPoolDesc desc) throws HiveException { + WMPool pool = new WMPool(desc.getResourcePlanName(), desc.getPoolPath()); + if (desc.getAllocFraction() != null) { + pool.setAllocFraction(desc.getAllocFraction()); + } + + if (desc.getQueryParallelism() != null) { + pool.setQueryParallelism(desc.getQueryParallelism()); + } + + if (desc.getSchedulingPolicy() != null) { + pool.setSchedulingPolicy(desc.getSchedulingPolicy()); + } + db.createOrUpdateWMPool(pool, desc.getNewPoolPath(), desc.isUpdate()); + return 0; + } + + private int dropWMPool(Hive db, DropWMPoolDesc desc) throws HiveException { + db.dropWMPool(desc.getResourcePlanName(), desc.getPoolPath()); + return 0; + } + + private int createOrAlterWMMapping(Hive db, CreateOrAlterWMMappingDesc desc) throws HiveException { + WMMapping mapping = + new WMMapping(desc.getResourcePlanName(), desc.getEntityType(), desc.getEntityName()); + mapping.setPoolPath(desc.getPoolPath()); + if (desc.getOrdering() != null) { + mapping.setOrdering(desc.getOrdering()); + } + db.createOrUpdateWMMapping(mapping, desc.isUpdate()); + return 0; + } + + private int dropWMMapping(Hive db, DropWMMappingDesc desc) throws HiveException { + db.dropWMMapping(new WMMapping( + desc.getResourcePlanName(), desc.getEntityType(), desc.getEntityName())); + return 0; + } + + private int createOrDropTriggerToPoolMapping(Hive db, CreateOrDropTriggerToPoolMappingDesc desc) + throws HiveException { + db.createOrDropTriggerToPoolMapping(desc.getResourcePlanName(), desc.getTriggerName(), + desc.getPoolPath(), desc.shouldDrop()); + return 0; + } + private int preInsertWork(Hive db, PreInsertTableDesc preInsertTableDesc) throws HiveException { try{ HiveMetaHook hook = preInsertTableDesc.getTable().getStorageHandler().getMetaHook(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java index 851245c154..ced3ad4f36 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java @@ -99,7 +99,7 @@ public UserPoolMapping(List mappings, String defaultPoolPath) { } private static void addMapping(WMMapping mapping, Map map, String text) { - Mapping val = new Mapping(mapping.getPoolName(), mapping.getOrdering()); + Mapping val = new Mapping(mapping.getPoolPath(), mapping.getOrdering()); Mapping oldValue = map.put(mapping.getEntityName(), val); if (oldValue != null) { throw new AssertionError("Duplicate mapping for " + text + " " + mapping.getEntityName() @@ -120,4 +120,4 @@ public String mapSessionToPoolName(MappingInput input) { if (mapping != null) return mapping.fullPoolName; return defaultPoolPath; } -} \ No newline at end of file +} diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 3e9fff195f..46f538f9c3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -134,6 +134,8 @@ import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator; @@ -4812,4 +4814,48 @@ public void dropWMTrigger(String rpName, String triggerName) throws HiveExceptio throw new HiveException(e); } } + + public void createOrUpdateWMPool(WMPool pool, String newPoolPath, boolean isUpdate) + throws HiveException { + try { + getMSC().createOrUpdateWMPool(pool, newPoolPath, isUpdate); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public void dropWMPool(String resourcePlanName, String poolPath) throws HiveException { + try { + getMSC().dropWMPool(resourcePlanName, poolPath); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public void createOrUpdateWMMapping(WMMapping mapping, boolean isUpdate) + throws HiveException { + try { + getMSC().createOrUpdateWMMapping(mapping, isUpdate); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public void dropWMMapping(WMMapping mapping) throws HiveException { + try { + getMSC().dropWMMapping(mapping); + } catch (Exception e) { + throw new HiveException(e); + } + } + + + public void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath, boolean shouldDrop) throws HiveException { + try { + getMSC().createOrDropTriggerToPoolMapping(resourcePlanName, triggerName, poolPath, shouldDrop); + } catch (Exception e) { + throw new HiveException(e); + } + } }; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 579f2df280..91058aecfe 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -99,6 +99,7 @@ import org.apache.hadoop.hive.ql.plan.StatsWork; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; +import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc; import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.CreateWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; @@ -109,6 +110,8 @@ import org.apache.hadoop.hive.ql.plan.DropIndexDesc; import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc; +import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc; import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; @@ -147,6 +150,8 @@ import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; +import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc; +import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.serde.serdeConstants; @@ -571,6 +576,24 @@ public void analyzeInternal(ASTNode input) throws SemanticException { case HiveParser.TOK_DROP_TRIGGER: analyzeDropTrigger(ast); break; + case HiveParser.TOK_CREATE_POOL: + analyzeCreatePool(ast); + break; + case HiveParser.TOK_ALTER_POOL: + analyzeAlterPool(ast); + break; + case HiveParser.TOK_DROP_POOL: + analyzeDropPool(ast); + break; + case HiveParser.TOK_CREATE_MAPPING: + analyzeCreateMapping(ast, false); + break; + case HiveParser.TOK_ALTER_MAPPING: + analyzeCreateMapping(ast, true); + break; + case HiveParser.TOK_DROP_MAPPING: + analyzeDropMapping(ast); + break; default: throw new SemanticException("Unsupported command: " + ast); } @@ -944,6 +967,19 @@ private void analyzeAlterResourcePlan(ASTNode ast) throws SemanticException { } desc.setNewName(ast.getChild(++i).getText()); break; + case HiveParser.TOK_ADD_TRIGGER: + case HiveParser.TOK_DROP_TRIGGER: { + if (ast.getChildCount() != 4) { + throw new SemanticException( + "Invalid syntax for in alter resource plan statment add trigger statement"); + } + boolean drop = ast.getChild(1).getType() == HiveParser.TOK_DROP_TRIGGER; + String triggerName = ast.getChild(2).getText(); + String poolPath = poolPath((ASTNode)ast.getChild(3)); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + new CreateOrDropTriggerToPoolMappingDesc(rpName, triggerName, poolPath, drop)), conf)); + return; + } default: throw new SemanticException( "Unexpected token in alter resource plan statement: " + child.getType()); @@ -1041,6 +1077,99 @@ private void analyzeDropTrigger(ASTNode ast) throws SemanticException { new DDLWork(getInputs(), getOutputs(), desc), conf)); } + private void analyzeCreatePool(ASTNode ast) throws SemanticException { + if (ast.getChildCount() != 5) { + throw new SemanticException("Invalid syntax for create pool."); + } + String rpName = ast.getChild(0).getText(); + String poolPath = poolPath((ASTNode)ast.getChild(1)); + Double allocFraction = Double.parseDouble(ast.getChild(2).getText()); + Integer parallelism = Integer.parseInt(ast.getChild(3).getText()); + String policy = ast.getChild(4).getText(); + CreateOrAlterWMPoolDesc desc = new CreateOrAlterWMPoolDesc(rpName, poolPath, allocFraction, + parallelism, policy, false, null); + rootTasks.add(TaskFactory.get( + new DDLWork(getInputs(), getOutputs(), desc), conf)); + } + + private void analyzeAlterPool(ASTNode ast) throws SemanticException { + if (ast.getChildCount() < 3) { + throw new SemanticException("Invalid syntax for alter pool."); + } + String rpName = ast.getChild(0).getText(); + String poolPath = poolPath((ASTNode)ast.getChild(1)); + String newPoolPath = null; + Double allocFraction = null; + Integer parallelism = null; + String policy = null; + + for (int i = 2; i < ast.getChildCount(); ++i) { + Tree child = ast.getChild(i); + switch (child.getType()) { + case HiveParser.TOK_ALLOC_FRACTION: + allocFraction = Double.parseDouble(child.getChild(0).getText()); + break; + case HiveParser.TOK_QUERY_PARALLELISM: + parallelism = Integer.parseInt(child.getChild(0).getText()); + break; + case HiveParser.TOK_SCHEDULING_POLICY: + policy = child.getChild(0).getText(); + break; + case HiveParser.TOK_PATH: + newPoolPath = child.getChild(0).getText(); + } + } + + CreateOrAlterWMPoolDesc desc = new CreateOrAlterWMPoolDesc(rpName, poolPath, allocFraction, parallelism, policy, + true, newPoolPath); + rootTasks.add(TaskFactory.get( + new DDLWork(getInputs(), getOutputs(), desc), conf)); + } + + private void analyzeDropPool(ASTNode ast) throws SemanticException { + if (ast.getChildCount() != 2) { + throw new SemanticException("Invalid syntax for drop pool."); + } + String rpName = ast.getChild(0).getText(); + String poolPath = poolPath((ASTNode)ast.getChild(1)); + + DropWMPoolDesc desc = new DropWMPoolDesc(rpName, poolPath); + rootTasks.add(TaskFactory.get( + new DDLWork(getInputs(), getOutputs(), desc), conf)); + } + + private void analyzeCreateMapping(ASTNode ast, boolean update) throws SemanticException { + if (ast.getChildCount() < 4) { + throw new SemanticException("Invalid syntax for drop pool."); + } + String rpName = ast.getChild(0).getText(); + String entityType = ast.getChild(1).getText(); + String entityName = ast.getChild(2).getText(); + String poolPath = poolPath((ASTNode)ast.getChild(3)); + Integer ordering = null; + if (ast.getChildCount() == 5) { + ordering = Integer.valueOf(((ASTNode)ast.getChild(4)).getText()); + } + + CreateOrAlterWMMappingDesc desc = + new CreateOrAlterWMMappingDesc(rpName, entityType, entityName, poolPath, ordering, update); + rootTasks.add(TaskFactory.get( + new DDLWork(getInputs(), getOutputs(), desc), conf)); + } + + private void analyzeDropMapping(ASTNode ast) throws SemanticException { + if (ast.getChildCount() != 3) { + throw new SemanticException("Invalid syntax for drop mapping."); + } + String rpName = ast.getChild(0).getText(); + String entityType = ast.getChild(1).getText(); + String entityName = ast.getChild(2).getText(); + + DropWMMappingDesc desc = new DropWMMappingDesc(rpName, entityType, entityName); + rootTasks.add(TaskFactory.get( + new DDLWork(getInputs(), getOutputs(), desc), conf)); + } + private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { String dbName = unescapeIdentifier(ast.getChild(0).getText()); boolean ifNotExists = false; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g index d61fce9303..1dcfe9dd28 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g @@ -362,6 +362,10 @@ KW_DEFAULT: 'DEFAULT'; KW_POOL: 'POOL'; KW_MOVE: 'MOVE'; KW_DO: 'DO'; +KW_ALLOC_FRACTION: 'ALLOC_FRACTION'; +KW_SCHEDULING_POLICY: 'SCHEDULING_POLICY'; +KW_PATH: 'PATH'; +KW_MAPPING: 'MAPPING'; // Operators // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work. diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 0bbd9be406..b0cd133ff5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -24,7 +24,7 @@ ASTLabelType=ASTNode; backtrack=false; k=3; } -import SelectClauseParser, FromClauseParser, IdentifiersParser; +import SelectClauseParser, FromClauseParser, IdentifiersParser, ResourcePlanParser; tokens { TOK_INSERT; @@ -418,6 +418,16 @@ TOK_CREATE_TRIGGER; TOK_ALTER_TRIGGER; TOK_DROP_TRIGGER; TOK_TRIGGER_EXPRESSION; +TOK_CREATE_POOL; +TOK_ALTER_POOL; +TOK_DROP_POOL; +TOK_ALLOC_FRACTION; +TOK_SCHEDULING_POLICY; +TOK_PATH; +TOK_CREATE_MAPPING; +TOK_ALTER_MAPPING; +TOK_DROP_MAPPING; +TOK_ADD_TRIGGER; } @@ -599,6 +609,10 @@ import org.apache.hadoop.hive.conf.HiveConf; xlateMap.put("KW_POOL", "POOL"); xlateMap.put("KW_MOVE", "MOVE"); xlateMap.put("KW_DO", "DO"); + xlateMap.put("KW_POOL", "POOL"); + xlateMap.put("KW_ALLOC_FRACTION", "ALLOC_FRACTION"); + xlateMap.put("KW_SCHEDULING_POLICY", "SCHEDULING_POLICY"); + xlateMap.put("KW_PATH", "PATH"); // Operators xlateMap.put("DOT", "."); @@ -937,12 +951,7 @@ ddlStatement | showCurrentRole | abortTransactionStatement | killQueryStatement - | createResourcePlanStatement - | alterResourcePlanStatement - | dropResourcePlanStatement - | createTriggerStatement - | alterTriggerStatement - | dropTriggerStatement + | resourcePlanDdlStatements ; ifExists @@ -996,129 +1005,6 @@ orReplace -> ^(TOK_ORREPLACE) ; -createResourcePlanStatement -@init { pushMsg("create resource plan statement", state); } -@after { popMsg(state); } - : KW_CREATE KW_RESOURCE KW_PLAN - name=identifier - (KW_WITH KW_QUERY_PARALLELISM parallelism=Number)? - -> ^(TOK_CREATERESOURCEPLAN $name $parallelism?) - ; - -alterRpSet -@init { pushMsg("alterRpSet", state); } -@after { popMsg(state); } - : ( - (KW_QUERY_PARALLELISM EQUAL parallelism=Number -> ^(TOK_QUERY_PARALLELISM $parallelism)) - | (KW_DEFAULT KW_POOL EQUAL poolName=StringLiteral -> ^(TOK_DEFAULT_POOL $poolName)) - ) - ; - -alterRpSetList -@init { pushMsg("alterRpSetList", state); } -@after { popMsg(state); } - : - alterRpSet (COMMA alterRpSet)* -> alterRpSet+ - ; - -activate : KW_ACTIVATE -> ^(TOK_ACTIVATE); -enable : KW_ENABLE -> ^(TOK_ENABLE); - -alterResourcePlanStatement -@init { pushMsg("alter resource plan statement", state); } -@after { popMsg(state); } - : KW_ALTER KW_RESOURCE KW_PLAN name=identifier ( - (KW_VALIDATE -> ^(TOK_ALTER_RP $name TOK_VALIDATE)) - | (KW_DISABLE -> ^(TOK_ALTER_RP $name TOK_DISABLE)) - | (KW_SET setList=alterRpSetList -> ^(TOK_ALTER_RP $name $setList)) - | (KW_RENAME KW_TO newName=identifier - -> ^(TOK_ALTER_RP $name TOK_RENAME $newName)) - | ((activate+ enable? | enable+ activate?) -> ^(TOK_ALTER_RP $name activate? enable?)) - ) - ; - -dropResourcePlanStatement -@init { pushMsg("drop resource plan statement", state); } -@after { popMsg(state); } - : KW_DROP KW_RESOURCE KW_PLAN name=identifier - -> ^(TOK_DROP_RP $name) - ; - -poolPath -@init { pushMsg("poolPath", state); } -@after { popMsg(state); } - : identifier^ (DOT identifier)* - ; - -triggerExpression -@init { pushMsg("triggerExpression", state); } -@after { popMsg(state); } - : triggerOrExpression -> ^(TOK_TRIGGER_EXPRESSION triggerOrExpression) - ; - -triggerOrExpression -@init { pushMsg("triggerOrExpression", state); } -@after { popMsg(state); } - : triggerAndExpression (KW_OR triggerAndExpression)* - ; - -triggerAndExpression -@init { pushMsg("triggerAndExpression", state); } -@after { popMsg(state); } - : triggerAtomExpression (KW_AND triggerAtomExpression)* - ; - -triggerAtomExpression -@init { pushMsg("triggerAtomExpression", state); } -@after { popMsg(state); } - : (identifier comparisionOperator triggerLiteral) - | (LPAREN triggerOrExpression RPAREN) - ; - -triggerLiteral -@init { pushMsg("triggerLiteral", state); } -@after { popMsg(state); } - : (Number (KW_HOUR|KW_MINUTE|KW_SECOND)?) - | ByteLengthLiteral - | StringLiteral - ; - -comparisionOperator -@init { pushMsg("comparisionOperator", state); } -@after { popMsg(state); } - : EQUAL | LESSTHAN | LESSTHANOREQUALTO | GREATERTHAN | GREATERTHANOREQUALTO - ; - -triggerActionExpression -@init { pushMsg("triggerActionExpression", state); } -@after { popMsg(state); } - : KW_KILL - | (KW_MOVE^ KW_TO! poolPath) - ; - -createTriggerStatement -@init { pushMsg("create trigger statement", state); } -@after { popMsg(state); } - : KW_CREATE KW_TRIGGER rpName=identifier DOT triggerName=identifier - KW_WHEN triggerExpression KW_DO triggerActionExpression - -> ^(TOK_CREATE_TRIGGER $rpName $triggerName triggerExpression triggerActionExpression) - ; - -alterTriggerStatement -@init { pushMsg("alter trigger statement", state); } -@after { popMsg(state); } - : KW_ALTER KW_TRIGGER rpName=identifier DOT triggerName=identifier - KW_WHEN triggerExpression KW_DO triggerActionExpression - -> ^(TOK_ALTER_TRIGGER $rpName $triggerName triggerExpression triggerActionExpression) - ; - -dropTriggerStatement -@init { pushMsg("drop trigger statement", state); } -@after { popMsg(state); } - : KW_DROP KW_TRIGGER rpName=identifier DOT triggerName=identifier - -> ^(TOK_DROP_TRIGGER $rpName $triggerName) - ; - createDatabaseStatement @init { pushMsg("create database statement", state); } @after { popMsg(state); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g index a0eca4b3b9..f1ca30109f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g @@ -830,8 +830,8 @@ nonReserved | KW_ZONE | KW_TIMESTAMPTZ | KW_DEFAULT - | KW_POOL - + | KW_RESOURCE | KW_PLAN | KW_PLANS | KW_QUERY_PARALLELISM | KW_ACTIVATE | KW_MOVE | KW_DO + | KW_POOL | KW_ALLOC_FRACTION | KW_SCHEDULING_POLICY | KW_PATH | KW_MAPPING ; //The following SQL2011 reserved keywords are used as function name only, but not as identifiers. diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g new file mode 100644 index 0000000000..bfa4e4f2da --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g @@ -0,0 +1,240 @@ +/** + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +parser grammar ResourcePlanParser; + +options +{ + output=AST; + ASTLabelType=ASTNode; + backtrack=false; + k=3; +} + +resourcePlanDdlStatements + : createResourcePlanStatement + | alterResourcePlanStatement + | dropResourcePlanStatement + | createTriggerStatement + | alterTriggerStatement + | dropTriggerStatement + | createPoolStatement + | alterPoolStatement + | dropPoolStatement + | createMappingStatement + | alterMappingStatement + | dropMappingStatement + ; + + +createResourcePlanStatement +@init { gParent.pushMsg("create resource plan statement", state); } +@after { gParent.popMsg(state); } + : KW_CREATE KW_RESOURCE KW_PLAN + name=identifier + (KW_WITH KW_QUERY_PARALLELISM parallelism=Number)? + -> ^(TOK_CREATERESOURCEPLAN $name $parallelism?) + ; + +alterRpSet +@init { gParent.pushMsg("alterRpSet", state); } +@after { gParent.popMsg(state); } + : ( + (KW_QUERY_PARALLELISM EQUAL parallelism=Number -> ^(TOK_QUERY_PARALLELISM $parallelism)) + | (KW_DEFAULT KW_POOL EQUAL poolName=StringLiteral -> ^(TOK_DEFAULT_POOL $poolName)) + ) + ; + +alterRpSetList +@init { gParent.pushMsg("alterRpSetList", state); } +@after { gParent.popMsg(state); } + : + alterRpSet (COMMA alterRpSet)* -> alterRpSet+ + ; + +activate : KW_ACTIVATE -> ^(TOK_ACTIVATE); +enable : KW_ENABLE -> ^(TOK_ENABLE); + +alterResourcePlanStatement +@init { gParent.pushMsg("alter resource plan statement", state); } +@after { gParent.popMsg(state); } + : KW_ALTER KW_RESOURCE KW_PLAN name=identifier ( + (KW_VALIDATE -> ^(TOK_ALTER_RP $name TOK_VALIDATE)) + | (KW_DISABLE -> ^(TOK_ALTER_RP $name TOK_DISABLE)) + | (KW_SET setList=alterRpSetList -> ^(TOK_ALTER_RP $name $setList)) + | (KW_RENAME KW_TO newName=identifier + -> ^(TOK_ALTER_RP $name TOK_RENAME $newName)) + | ((activate+ enable? | enable+ activate?) -> ^(TOK_ALTER_RP $name activate? enable?)) + | (KW_ADD KW_TRIGGER triggerName=identifier KW_TO poolPath + -> ^(TOK_ALTER_RP $name TOK_ADD_TRIGGER $triggerName poolPath)) + | (KW_DROP KW_TRIGGER triggerName=identifier KW_FROM poolPath + -> ^(TOK_ALTER_RP $name TOK_DROP_TRIGGER $triggerName poolPath)) + ) + ; + +dropResourcePlanStatement +@init { gParent.pushMsg("drop resource plan statement", state); } +@after { gParent.popMsg(state); } + : KW_DROP KW_RESOURCE KW_PLAN name=identifier + -> ^(TOK_DROP_RP $name) + ; + +poolPath +@init { gParent.pushMsg("poolPath", state); } +@after { gParent.popMsg(state); } + : identifier^ (DOT identifier)* + ; + +triggerExpression +@init { gParent.pushMsg("triggerExpression", state); } +@after { gParent.popMsg(state); } + : triggerOrExpression -> ^(TOK_TRIGGER_EXPRESSION triggerOrExpression) + ; + +triggerOrExpression +@init { gParent.pushMsg("triggerOrExpression", state); } +@after { gParent.popMsg(state); } + : triggerAndExpression (KW_OR triggerAndExpression)* + ; + +triggerAndExpression +@init { gParent.pushMsg("triggerAndExpression", state); } +@after { gParent.popMsg(state); } + : triggerAtomExpression (KW_AND triggerAtomExpression)* + ; + +triggerAtomExpression +@init { gParent.pushMsg("triggerAtomExpression", state); } +@after { gParent.popMsg(state); } + : (identifier comparisionOperator triggerLiteral) + | (LPAREN triggerOrExpression RPAREN) + ; + +triggerLiteral +@init { gParent.pushMsg("triggerLiteral", state); } +@after { gParent.popMsg(state); } + : (Number (KW_HOUR|KW_MINUTE|KW_SECOND)?) + | ByteLengthLiteral + | StringLiteral + ; + +comparisionOperator +@init { gParent.pushMsg("comparisionOperator", state); } +@after { gParent.popMsg(state); } + : EQUAL | LESSTHAN | LESSTHANOREQUALTO | GREATERTHAN | GREATERTHANOREQUALTO + ; + +triggerActionExpression +@init { gParent.pushMsg("triggerActionExpression", state); } +@after { gParent.popMsg(state); } + : KW_KILL + | (KW_MOVE^ KW_TO! poolPath) + ; + +createTriggerStatement +@init { gParent.pushMsg("create trigger statement", state); } +@after { gParent.popMsg(state); } + : KW_CREATE KW_TRIGGER rpName=identifier DOT triggerName=identifier + KW_WHEN triggerExpression KW_DO triggerActionExpression + -> ^(TOK_CREATE_TRIGGER $rpName $triggerName triggerExpression triggerActionExpression) + ; + +alterTriggerStatement +@init { gParent.pushMsg("alter trigger statement", state); } +@after { gParent.popMsg(state); } + : KW_ALTER KW_TRIGGER rpName=identifier DOT triggerName=identifier + KW_WHEN triggerExpression KW_DO triggerActionExpression + -> ^(TOK_ALTER_TRIGGER $rpName $triggerName triggerExpression triggerActionExpression) + ; + +dropTriggerStatement +@init { gParent.pushMsg("drop trigger statement", state); } +@after { gParent.popMsg(state); } + : KW_DROP KW_TRIGGER rpName=identifier DOT triggerName=identifier + -> ^(TOK_DROP_TRIGGER $rpName $triggerName) + ; + +createPoolStatement +@init { gParent.pushMsg("create pool statement", state); } +@after { gParent.popMsg(state); } + : KW_CREATE KW_POOL rpName=identifier DOT poolPath + KW_WITH + KW_ALLOC_FRACTION allocFraction=Number + KW_QUERY_PARALLELISM parallelism=Number + KW_SCHEDULING_POLICY policy=StringLiteral + -> ^(TOK_CREATE_POOL $rpName poolPath $allocFraction $parallelism $policy) + ; + +alterPoolSetCmd +@init { gParent.pushMsg("alterPoolSetCmd", state); } +@after { gParent.popMsg(state); } + : ( + (KW_ALLOC_FRACTION EQUAL allocFraction=Number) -> ^(TOK_ALLOC_FRACTION $allocFraction) + | (KW_QUERY_PARALLELISM EQUAL parallelism=Number) -> ^(TOK_QUERY_PARALLELISM $parallelism) + | (KW_SCHEDULING_POLICY EQUAL policy=StringLiteral) -> ^(TOK_SCHEDULING_POLICY $policy) + | (KW_PATH EQUAL path=poolPath) -> ^(TOK_PATH $path) + ) + ; + +alterPoolSetCmdList +@init { gParent.pushMsg("alterPoolSetCmdList", state); } +@after { gParent.popMsg(state); } + : alterPoolSetCmd (COMMA alterPoolSetCmd)* -> alterPoolSetCmd+ + ; + +alterPoolStatement +@init { gParent.pushMsg("alter pool statement", state); } +@after { gParent.popMsg(state); } + : KW_ALTER KW_POOL rpName=identifier DOT poolPath + KW_SET alterPoolSetCmdList + -> ^(TOK_ALTER_POOL $rpName poolPath alterPoolSetCmdList) + ; + +dropPoolStatement +@init { gParent.pushMsg("drop pool statement", state); } +@after { gParent.popMsg(state); } + : KW_DROP KW_POOL rpName=identifier DOT poolPath + -> ^(TOK_DROP_POOL $rpName poolPath) + ; + +createMappingStatement +@init { gParent.pushMsg("create mapping statement", state); } +@after { gParent.popMsg(state); } + : (KW_CREATE mappingType=(KW_USER | KW_GROUP) + KW_MAPPING name=identifier + KW_IN rpName=identifier KW_TO poolPath + (KW_WITH KW_ORDER order=Number)?) + -> ^(TOK_CREATE_MAPPING $rpName $mappingType $name poolPath $order?) + ; + +alterMappingStatement +@init { gParent.pushMsg("alter mapping statement", state); } +@after { gParent.popMsg(state); } + : (KW_ALTER mappingType=(KW_USER | KW_GROUP) KW_MAPPING + KW_MAPPING name=identifier + KW_IN rpName=identifier KW_TO poolPath + (KW_WITH KW_ORDER order=Number)?) + -> ^(TOK_ALTER_MAPPING $rpName $mappingType $name poolPath $order?) + ; + +dropMappingStatement +@init { gParent.pushMsg("drop mapping statement", state); } +@after { gParent.popMsg(state); } + : KW_DROP mappingType=(KW_USER | KW_GROUP) KW_MAPPING + name=identifier KW_IN rpName=identifier + -> ^(TOK_DROP_MAPPING $rpName $mappingType $name) + ; + diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index e704c73112..dd2b647cfe 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -141,6 +141,12 @@ commandType.put(HiveParser.TOK_CREATE_TRIGGER, HiveOperation.CREATE_TRIGGER); commandType.put(HiveParser.TOK_ALTER_TRIGGER, HiveOperation.ALTER_TRIGGER); commandType.put(HiveParser.TOK_DROP_TRIGGER, HiveOperation.DROP_TRIGGER); + commandType.put(HiveParser.TOK_CREATE_POOL, HiveOperation.CREATE_POOL); + commandType.put(HiveParser.TOK_ALTER_POOL, HiveOperation.ALTER_POOL); + commandType.put(HiveParser.TOK_DROP_POOL, HiveOperation.DROP_POOL); + commandType.put(HiveParser.TOK_CREATE_MAPPING, HiveOperation.CREATE_MAPPING); + commandType.put(HiveParser.TOK_ALTER_MAPPING, HiveOperation.ALTER_MAPPING); + commandType.put(HiveParser.TOK_DROP_MAPPING, HiveOperation.DROP_MAPPING); } static { @@ -323,6 +329,12 @@ private static BaseSemanticAnalyzer getInternal(QueryState queryState, ASTNode t case HiveParser.TOK_CREATE_TRIGGER: case HiveParser.TOK_ALTER_TRIGGER: case HiveParser.TOK_DROP_TRIGGER: + case HiveParser.TOK_CREATE_POOL: + case HiveParser.TOK_ALTER_POOL: + case HiveParser.TOK_DROP_POOL: + case HiveParser.TOK_CREATE_MAPPING: + case HiveParser.TOK_ALTER_MAPPING: + case HiveParser.TOK_DROP_MAPPING: return new DDLSemanticAnalyzer(queryState); case HiveParser.TOK_CREATEFUNCTION: diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMMappingDesc.java new file mode 100644 index 0000000000..8da2125b66 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMMappingDesc.java @@ -0,0 +1,90 @@ +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +@Explain(displayName = "Create/Alter Mapping", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateOrAlterWMMappingDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = -442968568922083053L; + + private String resourcePlanName; + private String entityType; + private String entityName; + private String poolPath; + private Integer ordering; + private boolean update; + + public CreateOrAlterWMMappingDesc() {} + + public CreateOrAlterWMMappingDesc(String resourcePlanName, String entityType, String entityName, + String poolPath, Integer ordering, boolean update) { + this.resourcePlanName = resourcePlanName; + this.entityType = entityType; + this.entityName = entityName; + this.poolPath = poolPath; + this.ordering = ordering; + this.setUpdate(update); + } + + @Explain(displayName = "resourcePlanName", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + public void setResourcePlanName(String resourcePlanName) { + this.resourcePlanName = resourcePlanName; + } + + @Explain(displayName = "entityType", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getEntityType() { + return entityType; + } + + public void setEntityType(String entityType) { + this.entityType = entityType; + } + + @Explain(displayName = "entityName", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getEntityName() { + return entityName; + } + + public void setEntityName(String entityName) { + this.entityName = entityName; + } + + @Explain(displayName = "poolPath", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPath() { + return poolPath; + } + + public void setPoolPath(String poolPath) { + this.poolPath = poolPath; + } + + @Explain(displayName = "ordering", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Integer getOrdering() { + return ordering; + } + + public void setOrdering(Integer ordering) { + this.ordering = ordering; + } + + @Explain(displayName = "update", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public boolean isUpdate() { + return update; + } + + public void setUpdate(boolean update) { + this.update = update; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMPoolDesc.java new file mode 100644 index 0000000000..a430b708a1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrAlterWMPoolDesc.java @@ -0,0 +1,94 @@ +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +@Explain(displayName = "Create/Alter Pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateOrAlterWMPoolDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 4872940135771213510L; + + private String resourcePlanName; + private String poolPath; + private Double allocFraction; + private Integer queryParallelism; + private String schedulingPolicy; + private boolean update; + private String newPoolPath; + + public CreateOrAlterWMPoolDesc() {} + + public CreateOrAlterWMPoolDesc(String resourcePlanName, String poolPath, Double allocFraction, + Integer queryParallelism, String schedulingPolicy, boolean update, String newPoolPath) { + this.resourcePlanName = resourcePlanName; + this.poolPath = poolPath; + this.allocFraction = allocFraction; + this.queryParallelism = queryParallelism; + this.schedulingPolicy = schedulingPolicy; + this.update = update; + this.newPoolPath = newPoolPath; + } + + @Explain(displayName="resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + public void setResourcePlanName(String resourcePlanName) { + this.resourcePlanName = resourcePlanName; + } + + @Explain(displayName="poolPath", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPath() { + return poolPath; + } + + public void setPoolPath(String poolPath) { + this.poolPath = poolPath; + } + + @Explain(displayName="allocFraction", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Double getAllocFraction() { + return allocFraction; + } + + public void setAllocFraction(Double allocFraction) { + this.allocFraction = allocFraction; + } + + @Explain(displayName="queryParallelism", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Integer getQueryParallelism() { + return queryParallelism; + } + + public void setQueryParallelism(Integer queryParallelism) { + this.queryParallelism = queryParallelism; + } + + @Explain(displayName="schedulingPolicy", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getSchedulingPolicy() { + return schedulingPolicy; + } + + public void setSchedulingPolicy(String schedulingPolicy) { + this.schedulingPolicy = schedulingPolicy; + } + + @Explain(displayName="isUpdate", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public boolean isUpdate() { + return update; + } + + public void setUpdate(boolean update) { + this.update = update; + } + + @Explain(displayName="newPoolPath", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getNewPoolPath() { + return newPoolPath; + } + + public void setNewPoolPath(String newPoolPath) { + this.newPoolPath = newPoolPath; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrDropTriggerToPoolMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrDropTriggerToPoolMappingDesc.java new file mode 100644 index 0000000000..e1f912f951 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateOrDropTriggerToPoolMappingDesc.java @@ -0,0 +1,66 @@ +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +@Explain(displayName = "Create/Drop Trigger to pool mappings", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateOrDropTriggerToPoolMappingDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 383046258694558029L; + + private String resourcePlanName; + private String triggerName; + private String poolPath; + private boolean drop; + + public CreateOrDropTriggerToPoolMappingDesc() {} + + public CreateOrDropTriggerToPoolMappingDesc(String resourcePlanName, String triggerName, + String poolPath, boolean drop) { + this.resourcePlanName = resourcePlanName; + this.triggerName = triggerName; + this.poolPath = poolPath; + this.drop = drop; + } + + @Explain(displayName = "resourcePlanName", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + public void setResourcePlanName(String resourcePlanName) { + this.resourcePlanName = resourcePlanName; + } + + @Explain(displayName = "triggerName", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getTriggerName() { + return triggerName; + } + + public void setTriggerName(String triggerName) { + this.triggerName = triggerName; + } + + @Explain(displayName = "poolPath", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getPoolPath() { + return poolPath; + } + + public void setPoolPath(String poolPath) { + this.poolPath = poolPath; + } + + @Explain(displayName = "drop or create", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public boolean shouldDrop() { + return drop; + } + + public void setDrop(boolean drop) { + this.drop = drop; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index 369f8440a2..561800a0e8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -95,6 +95,14 @@ private AlterWMTriggerDesc alterWMTriggerDesc; private DropWMTriggerDesc dropWMTriggerDesc; + private CreateOrAlterWMPoolDesc wmPoolDesc; + private DropWMPoolDesc dropWMPoolDesc; + + private CreateOrAlterWMMappingDesc wmMappingDesc; + private DropWMMappingDesc dropWMMappingDesc; + + private CreateOrDropTriggerToPoolMappingDesc triggerToPoolMappingDesc; + boolean needLock = false; /** @@ -571,31 +579,61 @@ public DDLWork(HashSet inputs, HashSet outputs, public DDLWork(HashSet inputs, HashSet outputs, DropResourcePlanDesc dropResourcePlanDesc) { this(inputs, outputs); - this.setDropResourcePlanDesc(dropResourcePlanDesc); + this.dropResourcePlanDesc = dropResourcePlanDesc; } public DDLWork(HashSet inputs, HashSet outputs, AlterResourcePlanDesc alterResourcePlanDesc) { this(inputs, outputs); - this.setAlterResourcePlanDesc(alterResourcePlanDesc); + this.alterResourcePlanDesc = alterResourcePlanDesc; } public DDLWork(HashSet inputs, HashSet outputs, CreateWMTriggerDesc createWMTriggerDesc) { this(inputs, outputs); - this.setCreateWMTriggerDesc(createWMTriggerDesc); + this.createWMTriggerDesc = createWMTriggerDesc; } public DDLWork(HashSet inputs, HashSet outputs, AlterWMTriggerDesc alterWMTriggerDesc) { this(inputs, outputs); - this.setAlterWMTriggerDesc(alterWMTriggerDesc); + this.alterWMTriggerDesc = alterWMTriggerDesc; } public DDLWork(HashSet inputs, HashSet outputs, DropWMTriggerDesc dropWMTriggerDesc) { this(inputs, outputs); - this.setDropWMTriggerDesc(dropWMTriggerDesc); + this.dropWMTriggerDesc = dropWMTriggerDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + CreateOrAlterWMPoolDesc wmPoolDesc) { + this(inputs, outputs); + this.wmPoolDesc = wmPoolDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + DropWMPoolDesc dropWMPoolDesc) { + this(inputs, outputs); + this.dropWMPoolDesc = dropWMPoolDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + CreateOrAlterWMMappingDesc wmMappingDesc) { + this(inputs, outputs); + this.wmMappingDesc = wmMappingDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + DropWMMappingDesc dropWMMappingDesc) { + this(inputs, outputs); + this.dropWMMappingDesc = dropWMMappingDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + CreateOrDropTriggerToPoolMappingDesc triggerToPoolMappingDesc) { + this(inputs, outputs); + this.triggerToPoolMappingDesc = triggerToPoolMappingDesc; } /** @@ -1344,4 +1382,44 @@ public DropWMTriggerDesc getDropWMTriggerDesc() { public void setDropWMTriggerDesc(DropWMTriggerDesc dropWMTriggerDesc) { this.dropWMTriggerDesc = dropWMTriggerDesc; } + + public CreateOrAlterWMPoolDesc getWmPoolDesc() { + return wmPoolDesc; + } + + public void setWmPoolDesc(CreateOrAlterWMPoolDesc wmPoolDesc) { + this.wmPoolDesc = wmPoolDesc; + } + + public DropWMPoolDesc getDropWMPoolDesc() { + return dropWMPoolDesc; + } + + public void setDropWMPoolDesc(DropWMPoolDesc dropWMPoolDesc) { + this.dropWMPoolDesc = dropWMPoolDesc; + } + + public CreateOrAlterWMMappingDesc getWmMappingDesc() { + return wmMappingDesc; + } + + public void setWmMappingDesc(CreateOrAlterWMMappingDesc wmMappingDesc) { + this.wmMappingDesc = wmMappingDesc; + } + + public DropWMMappingDesc getDropWMMappingDesc() { + return dropWMMappingDesc; + } + + public void setDropWMMappingDesc(DropWMMappingDesc dropWMMappingDesc) { + this.dropWMMappingDesc = dropWMMappingDesc; + } + + public CreateOrDropTriggerToPoolMappingDesc getTriggerToPoolMappingDesc() { + return triggerToPoolMappingDesc; + } + + public void setTriggerToPoolMappingDesc(CreateOrDropTriggerToPoolMappingDesc triggerToPoolMappingDesc) { + this.triggerToPoolMappingDesc = triggerToPoolMappingDesc; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMMappingDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMMappingDesc.java new file mode 100644 index 0000000000..889713b0d4 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMMappingDesc.java @@ -0,0 +1,54 @@ +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +@Explain(displayName = "Drop resource plan", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class DropWMMappingDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = -1567558687529244218L; + + private String resourcePlanName; + private String entityType; + private String entityName; + + public DropWMMappingDesc() {} + + public DropWMMappingDesc(String resourcePlanName, String entityType, String entityName) { + super(); + this.resourcePlanName = resourcePlanName; + this.entityType = entityType; + this.entityName = entityName; + } + + @Explain(displayName = "resourcePlanName", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + public void setResourcePlanName(String resourcePlanName) { + this.resourcePlanName = resourcePlanName; + } + + @Explain(displayName = "entityType", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getEntityType() { + return entityType; + } + + public void setEntityType(String entityType) { + this.entityType = entityType; + } + + @Explain(displayName = "entityName", + explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getEntityName() { + return entityName; + } + + public void setEntityName(String entityName) { + this.entityName = entityName; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMPoolDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMPoolDesc.java new file mode 100644 index 0000000000..ff1bedd0c4 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DropWMPoolDesc.java @@ -0,0 +1,33 @@ +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +public class DropWMPoolDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = -2608462103392563252L; + + private String resourcePlanName; + private String poolPath; + + public DropWMPoolDesc() {} + + public DropWMPoolDesc(String resourcePlanName, String poolPath) { + this.resourcePlanName = resourcePlanName; + this.poolPath = poolPath; + } + + public String getResourcePlanName() { + return resourcePlanName; + } + + public void setResourcePlanName(String resourcePlanName) { + this.resourcePlanName = resourcePlanName; + } + + public String getPoolPath() { + return poolPath; + } + + public void setPoolPath(String poolPath) { + this.poolPath = poolPath; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java index 1ce1c76f37..226214f34a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java @@ -146,7 +146,14 @@ DROP_RESOURCEPLAN("DROP RESOURCEPLAN", null, null, false, false), CREATE_TRIGGER("CREATE TRIGGER", null, null, false, false), ALTER_TRIGGER("ALTER TRIGGER", null, null, false, false), - DROP_TRIGGER("DROP TRIGGER", null, null, false, false); + DROP_TRIGGER("DROP TRIGGER", null, null, false, false), + CREATE_POOL("CREATE POOL", null, null, false, false), + ALTER_POOL("ALTER POOL", null, null, false, false), + DROP_POOL("DROP POOL", null, null, false, false), + CREATE_MAPPING("CREATE MAPPING", null, null, false, false), + ALTER_MAPPING("ALTER MAPPING", null, null, false, false), + DROP_MAPPING("DROP MAPPING", null, null, false, false); + private String operationName; diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java index ba1d01f3c6..6ac6d6ea42 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java @@ -139,6 +139,12 @@ CREATE_TRIGGER, ALTER_TRIGGER, DROP_TRIGGER, + CREATE_POOL, + ALTER_POOL, + DROP_POOL, + CREATE_MAPPING, + ALTER_MAPPING, + DROP_MAPPING, // ==== Hive command operation types starts here ==== // SET, diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java index dc20d16df5..5b09ae8514 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java @@ -317,6 +317,12 @@ public HivePrivilegeObjectType getObjectType() { adminPrivOps.add(HiveOperationType.CREATE_TRIGGER); adminPrivOps.add(HiveOperationType.ALTER_TRIGGER); adminPrivOps.add(HiveOperationType.DROP_TRIGGER); + adminPrivOps.add(HiveOperationType.CREATE_POOL); + adminPrivOps.add(HiveOperationType.ALTER_POOL); + adminPrivOps.add(HiveOperationType.DROP_POOL); + adminPrivOps.add(HiveOperationType.CREATE_MAPPING); + adminPrivOps.add(HiveOperationType.ALTER_MAPPING); + adminPrivOps.add(HiveOperationType.DROP_MAPPING); // operations require select priv op2Priv.put(HiveOperationType.SHOWCOLUMNS, PrivRequirement.newIOPrivRequirement @@ -482,6 +488,12 @@ public HivePrivilegeObjectType getObjectType() { op2Priv.put(HiveOperationType.CREATE_TRIGGER, PrivRequirement.newIOPrivRequirement(null, null)); op2Priv.put(HiveOperationType.ALTER_TRIGGER, PrivRequirement.newIOPrivRequirement(null, null)); op2Priv.put(HiveOperationType.DROP_TRIGGER, PrivRequirement.newIOPrivRequirement(null, null)); + op2Priv.put(HiveOperationType.CREATE_POOL, PrivRequirement.newIOPrivRequirement(null, null)); + op2Priv.put(HiveOperationType.ALTER_POOL, PrivRequirement.newIOPrivRequirement(null, null)); + op2Priv.put(HiveOperationType.DROP_POOL, PrivRequirement.newIOPrivRequirement(null, null)); + op2Priv.put(HiveOperationType.CREATE_MAPPING, PrivRequirement.newIOPrivRequirement(null, null)); + op2Priv.put(HiveOperationType.ALTER_MAPPING, PrivRequirement.newIOPrivRequirement(null, null)); + op2Priv.put(HiveOperationType.DROP_MAPPING, PrivRequirement.newIOPrivRequirement(null, null)); } /** diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java index 5ba6639e0c..905ce20da0 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java @@ -136,7 +136,7 @@ public static WMMapping mapping(String user, String pool) { public static WMMapping mapping(String type, String user, String pool, int ordering) { WMMapping mapping = new WMMapping("rp", type, user); - mapping.setPoolName(pool); + mapping.setPoolPath(pool); mapping.setOrdering(ordering); return mapping; } @@ -161,6 +161,10 @@ private static WMFullResourcePlan createDummyPlan(int numSessions) { WMFullResourcePlan plan = new WMFullResourcePlan(new WMResourcePlan("rp"), Lists.newArrayList(pool("llap", numSessions, 1.0f))); plan.getPlan().setDefaultPoolPath("llap"); + WMMapping mapping = new WMMapping("rp", "DEFAULT", ""); + mapping.setPoolPath("llap"); + plan.addToPools(pool("llap", numSessions, 1.0f)); + plan.addToMappings(mapping); return plan; } diff --git ql/src/test/queries/clientpositive/resourceplan.q ql/src/test/queries/clientpositive/resourceplan.q index a094712dd8..0d1b93ef31 100644 --- ql/src/test/queries/clientpositive/resourceplan.q +++ ql/src/test/queries/clientpositive/resourceplan.q @@ -150,3 +150,65 @@ ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ = 1000K DO KILL; ALTER RESOURCE PLAN plan_2 DISABLE; CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ = 0 DO MOVE TO null_pool; SELECT * FROM SYS.WM_TRIGGERS; + + +-- +-- Create pool command. +-- + +-- Cannot create pool in active plans. +CREATE POOL plan_1.default WITH + ALLOC_FRACTION 1.0 QUERY_PARALLELISM 5 SCHEDULING_POLICY 'default'; + +CREATE POOL plan_2.default WITH + ALLOC_FRACTION 1.0 QUERY_PARALLELISM 5 SCHEDULING_POLICY 'default'; +SELECT * FROM SYS.WM_POOLS; + +CREATE POOL plan_2.default.c1 WITH + ALLOC_FRACTION 0.3 QUERY_PARALLELISM 3 SCHEDULING_POLICY 'priority'; + +CREATE POOL plan_2.default.c2 WITH + ALLOC_FRACTION 0.7 QUERY_PARALLELISM 2 SCHEDULING_POLICY 'fair'; + +ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.2; + +ALTER POOL plan_2.default SET path = def; +SELECT * FROM SYS.WM_POOLS; + +DROP POOL plan_2.default; +SELECT * FROM SYS.WM_POOLS; + + +-- +-- Pool to trigger mappings. +-- + +-- Success. +ALTER RESOURCE PLAN plan_2 ADD TRIGGER trigger_1 TO def.c1; +ALTER RESOURCE PLAN plan_2 ADD TRIGGER trigger_1 TO def.c2; + +-- Failures. +ALTER RESOURCE PLAN plan_2 ADD TRIGGER trigger_1 TO default; +ALTER RESOURCE PLAN plan_2 ADD TRIGGER trigger_2 TO def; + +SELECT * FROM SYS.WM_TRIGGERS_TO_POOLS; + +-- Drop +ALTER RESOURCE PLAN plan_2 DROP TRIGGER trigger_1 FROM def.c1; +ALTER RESOURCE PLAN plan_2 DROP TRIGGER trigger_2 FROM def.c1; +SELECT * FROM SYS.WM_TRIGGERS_TO_POOLS; + + +-- +-- User and group mappings. +-- + +CREATE USER MAPPING user1 IN plan_2 TO def; +CREATE USER MAPPING user2 IN plan_2 TO def WITH ORDER 1; +CREATE GROUP MAPPING group1 IN plan_2 TO def.c1; +CREATE GROUP MAPPING group2 IN plan_2 TO def.c2 WITH ORDER 1; +SELECT * FROM SYS.WM_MAPPINGS; + +DROP USER MAPPING user2 in plan_2; +DROP GROUP MAPPING group2 in plan_2; +SELECT * FROM SYS.WM_MAPPINGS; diff --git ql/src/test/results/clientpositive/llap/resourceplan.q.out ql/src/test/results/clientpositive/llap/resourceplan.q.out index fe4d77a1d2..f25e806d8e 100644 --- ql/src/test/results/clientpositive/llap/resourceplan.q.out +++ ql/src/test/results/clientpositive/llap/resourceplan.q.out @@ -2211,6 +2211,154 @@ ON POSTHOOK: type: CREATETABLE POSTHOOK: Output: SYS@WM_TRIGGERS POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `WM_POOLS` ( + `RP_NAME` string, + `PATH` string, + `ALLOC_FRACTION` double, + `QUERY_PARALLELISM` int, + `SCHEDULING_POLICY` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + WM_RESOURCEPLAN.NAME, + WM_POOL.PATH, + WM_POOL.ALLOC_FRACTION, + WM_POOL.QUERY_PARALLELISM, + WM_POOL.SCHEDULING_POLICY +FROM + WM_POOL +JOIN + WM_RESOURCEPLAN +ON + WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@WM_POOLS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `WM_POOLS` ( + `RP_NAME` string, + `PATH` string, + `ALLOC_FRACTION` double, + `QUERY_PARALLELISM` int, + `SCHEDULING_POLICY` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + WM_RESOURCEPLAN.NAME, + WM_POOL.PATH, + WM_POOL.ALLOC_FRACTION, + WM_POOL.QUERY_PARALLELISM, + WM_POOL.SCHEDULING_POLICY +FROM + WM_POOL +JOIN + WM_RESOURCEPLAN +ON + WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@WM_POOLS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `WM_TRIGGERS_TO_POOLS` ( + `RP_NAME` string, + `TRIGGER_NAME` string, + `POOL_PATH` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + WM_RESOURCEPLAN.NAME RP_NAME, + WM_TRIGGER.NAME TRIGGER_NAME, + WM_POOL.PATH POOL_PATH +FROM + WM_POOL_TO_TRIGGER +JOIN WM_POOL ON WM_POOL_TO_TRIGGER.POOL_ID = WM_POOL.POOL_ID +JOIN WM_TRIGGER ON WM_POOL_TO_TRIGGER.TRIGGER_ID = WM_TRIGGER.TRIGGER_ID +JOIN WM_RESOURCEPLAN ON WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@WM_TRIGGERS_TO_POOLS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `WM_TRIGGERS_TO_POOLS` ( + `RP_NAME` string, + `TRIGGER_NAME` string, + `POOL_PATH` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + WM_RESOURCEPLAN.NAME RP_NAME, + WM_TRIGGER.NAME TRIGGER_NAME, + WM_POOL.PATH POOL_PATH +FROM + WM_POOL_TO_TRIGGER +JOIN WM_POOL ON WM_POOL_TO_TRIGGER.POOL_ID = WM_POOL.POOL_ID +JOIN WM_TRIGGER ON WM_POOL_TO_TRIGGER.TRIGGER_ID = WM_TRIGGER.TRIGGER_ID +JOIN WM_RESOURCEPLAN ON WM_POOL.RP_ID = WM_RESOURCEPLAN.RP_ID" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@WM_TRIGGERS_TO_POOLS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `WM_MAPPINGS` ( + `RP_NAME` string, + `ENTITY_TYPE` string, + `ENTITY_NAME` string, + `POOL_PATH` string, + `ORDERING` int +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + WM_RESOURCEPLAN.NAME, + ENTITY_TYPE, + ENTITY_NAME, + WM_POOL.PATH, + ORDERING +FROM + WM_MAPPING +JOIN WM_RESOURCEPLAN ON WM_MAPPING.RP_ID = WM_RESOURCEPLAN.RP_ID +JOIN WM_POOL ON WM_POOL.POOL_ID = WM_MAPPING.POOL_ID" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@WM_MAPPINGS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `WM_MAPPINGS` ( + `RP_NAME` string, + `ENTITY_TYPE` string, + `ENTITY_NAME` string, + `POOL_PATH` string, + `ORDERING` int +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + WM_RESOURCEPLAN.NAME, + ENTITY_TYPE, + ENTITY_NAME, + WM_POOL.PATH, + ORDERING +FROM + WM_MAPPING +JOIN WM_RESOURCEPLAN ON WM_MAPPING.RP_ID = WM_RESOURCEPLAN.RP_ID +JOIN WM_POOL ON WM_POOL.POOL_ID = WM_MAPPING.POOL_ID" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@WM_MAPPINGS +POSTHOOK: Output: database:sys PREHOOK: query: DROP DATABASE IF EXISTS INFORMATION_SCHEMA PREHOOK: type: DROPDATABASE POSTHOOK: query: DROP DATABASE IF EXISTS INFORMATION_SCHEMA @@ -3368,3 +3516,154 @@ POSTHOOK: Input: sys@wm_triggers #### A masked pattern was here #### plan_2 trigger_1 BYTES_READ = 0 MOVE TO null_pool plan_1 trigger_2 BYTES_READ > 100 MOVE TO slow_pool +PREHOOK: query: CREATE POOL plan_1.default WITH + ALLOC_FRACTION 1.0 QUERY_PARALLELISM 5 SCHEDULING_POLICY 'default' +PREHOOK: type: CREATE POOL +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.) +PREHOOK: query: CREATE POOL plan_2.default WITH + ALLOC_FRACTION 1.0 QUERY_PARALLELISM 5 SCHEDULING_POLICY 'default' +PREHOOK: type: CREATE POOL +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. AlreadyExistsException(message:Pool with path: default already exists in resource plan: plan_2) +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_2 default 1.0 4 NULL +plan_1 default 1.0 4 NULL +PREHOOK: query: CREATE POOL plan_2.default.c1 WITH + ALLOC_FRACTION 0.3 QUERY_PARALLELISM 3 SCHEDULING_POLICY 'priority' +PREHOOK: type: CREATE POOL +POSTHOOK: query: CREATE POOL plan_2.default.c1 WITH + ALLOC_FRACTION 0.3 QUERY_PARALLELISM 3 SCHEDULING_POLICY 'priority' +POSTHOOK: type: CREATE POOL +PREHOOK: query: CREATE POOL plan_2.default.c2 WITH + ALLOC_FRACTION 0.7 QUERY_PARALLELISM 2 SCHEDULING_POLICY 'fair' +PREHOOK: type: CREATE POOL +POSTHOOK: query: CREATE POOL plan_2.default.c2 WITH + ALLOC_FRACTION 0.7 QUERY_PARALLELISM 2 SCHEDULING_POLICY 'fair' +POSTHOOK: type: CREATE POOL +PREHOOK: query: ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.2 +PREHOOK: type: ALTER POOL +POSTHOOK: query: ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.2 +POSTHOOK: type: ALTER POOL +PREHOOK: query: ALTER POOL plan_2.default SET path = def +PREHOOK: type: ALTER POOL +POSTHOOK: query: ALTER POOL plan_2.default SET path = def +POSTHOOK: type: ALTER POOL +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_2 def 1.0 4 NULL +plan_2 def.c1 0.3 3 'priority' +plan_2 def.c2 0.2 2 'fair' +plan_1 default 1.0 4 NULL +PREHOOK: query: DROP POOL plan_2.default +PREHOOK: type: DROP POOL +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot delete pool: default) +PREHOOK: query: SELECT * FROM SYS.WM_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_pools +#### A masked pattern was here #### +plan_2 def 1.0 4 NULL +plan_2 def.c1 0.3 3 'priority' +plan_2 def.c2 0.2 2 'fair' +plan_1 default 1.0 4 NULL +PREHOOK: query: ALTER RESOURCE PLAN plan_2 ADD TRIGGER trigger_1 TO def.c1 +PREHOOK: type: ALTER RESOURCEPLAN +POSTHOOK: query: ALTER RESOURCE PLAN plan_2 ADD TRIGGER trigger_1 TO def.c1 +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: ALTER RESOURCE PLAN plan_2 ADD TRIGGER trigger_1 TO def.c2 +PREHOOK: type: ALTER RESOURCEPLAN +POSTHOOK: query: ALTER RESOURCE PLAN plan_2 ADD TRIGGER trigger_1 TO def.c2 +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: ALTER RESOURCE PLAN plan_2 ADD TRIGGER trigger_1 TO default +PREHOOK: type: ALTER RESOURCEPLAN +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot find pool with path: default) +PREHOOK: query: ALTER RESOURCE PLAN plan_2 ADD TRIGGER trigger_2 TO def +PREHOOK: type: ALTER RESOURCEPLAN +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2) +PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS_TO_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_triggers_to_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS_TO_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_triggers_to_pools +#### A masked pattern was here #### +plan_2 trigger_1 def.c1 +plan_2 trigger_1 def.c2 +PREHOOK: query: ALTER RESOURCE PLAN plan_2 DROP TRIGGER trigger_1 FROM def.c1 +PREHOOK: type: ALTER RESOURCEPLAN +POSTHOOK: query: ALTER RESOURCE PLAN plan_2 DROP TRIGGER trigger_1 FROM def.c1 +POSTHOOK: type: ALTER RESOURCEPLAN +PREHOOK: query: ALTER RESOURCE PLAN plan_2 DROP TRIGGER trigger_2 FROM def.c1 +PREHOOK: type: ALTER RESOURCEPLAN +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2) +PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS_TO_POOLS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_triggers_to_pools +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS_TO_POOLS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_triggers_to_pools +#### A masked pattern was here #### +plan_2 trigger_1 def.c2 +PREHOOK: query: CREATE USER MAPPING user1 IN plan_2 TO def +PREHOOK: type: CREATE MAPPING +POSTHOOK: query: CREATE USER MAPPING user1 IN plan_2 TO def +POSTHOOK: type: CREATE MAPPING +PREHOOK: query: CREATE USER MAPPING user2 IN plan_2 TO def WITH ORDER 1 +PREHOOK: type: CREATE MAPPING +POSTHOOK: query: CREATE USER MAPPING user2 IN plan_2 TO def WITH ORDER 1 +POSTHOOK: type: CREATE MAPPING +PREHOOK: query: CREATE GROUP MAPPING group1 IN plan_2 TO def.c1 +PREHOOK: type: CREATE MAPPING +POSTHOOK: query: CREATE GROUP MAPPING group1 IN plan_2 TO def.c1 +POSTHOOK: type: CREATE MAPPING +PREHOOK: query: CREATE GROUP MAPPING group2 IN plan_2 TO def.c2 WITH ORDER 1 +PREHOOK: type: CREATE MAPPING +POSTHOOK: query: CREATE GROUP MAPPING group2 IN plan_2 TO def.c2 WITH ORDER 1 +POSTHOOK: type: CREATE MAPPING +PREHOOK: query: SELECT * FROM SYS.WM_MAPPINGS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_mappings +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_MAPPINGS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_mappings +#### A masked pattern was here #### +plan_2 USER user1 def 0 +plan_2 USER user2 def 1 +plan_2 GROUP group1 def.c1 0 +plan_2 GROUP group2 def.c2 1 +PREHOOK: query: DROP USER MAPPING user2 in plan_2 +PREHOOK: type: DROP MAPPING +POSTHOOK: query: DROP USER MAPPING user2 in plan_2 +POSTHOOK: type: DROP MAPPING +PREHOOK: query: DROP GROUP MAPPING group2 in plan_2 +PREHOOK: type: DROP MAPPING +POSTHOOK: query: DROP GROUP MAPPING group2 in plan_2 +POSTHOOK: type: DROP MAPPING +PREHOOK: query: SELECT * FROM SYS.WM_MAPPINGS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_mappings +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_MAPPINGS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_mappings +#### A masked pattern was here #### +plan_2 USER user1 def 0 +plan_2 GROUP group1 def.c1 0 diff --git service/src/java/org/apache/hive/service/server/HiveServer2.java service/src/java/org/apache/hive/service/server/HiveServer2.java index 602dfdae7c..cc3e951754 100644 --- service/src/java/org/apache/hive/service/server/HiveServer2.java +++ service/src/java/org/apache/hive/service/server/HiveServer2.java @@ -303,6 +303,9 @@ private WMFullResourcePlan createTestResourcePlan() { resourcePlan = new WMFullResourcePlan( new WMResourcePlan("testDefault"), Lists.newArrayList(pool)); resourcePlan.getPlan().setDefaultPoolPath("testDefault"); + WMMapping mapping = new WMMapping("testDefault", "DEFAULT", ""); + mapping.setPoolPath("llap"); + resourcePlan.addToMappings(mapping); return resourcePlan; } diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index 58c5af5c7b..45a09ca2ba 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size988; - ::apache::thrift::protocol::TType _etype991; - xfer += iprot->readListBegin(_etype991, _size988); - this->success.resize(_size988); - uint32_t _i992; - for (_i992 = 0; _i992 < _size988; ++_i992) + uint32_t _size1008; + ::apache::thrift::protocol::TType _etype1011; + xfer += iprot->readListBegin(_etype1011, _size1008); + this->success.resize(_size1008); + uint32_t _i1012; + for (_i1012 = 0; _i1012 < _size1008; ++_i1012) { - xfer += iprot->readString(this->success[_i992]); + xfer += iprot->readString(this->success[_i1012]); } xfer += iprot->readListEnd(); } @@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter993; - for (_iter993 = this->success.begin(); _iter993 != this->success.end(); ++_iter993) + std::vector ::const_iterator _iter1013; + for (_iter1013 = this->success.begin(); _iter1013 != this->success.end(); ++_iter1013) { - xfer += oprot->writeString((*_iter993)); + xfer += oprot->writeString((*_iter1013)); } xfer += oprot->writeListEnd(); } @@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size994; - ::apache::thrift::protocol::TType _etype997; - xfer += iprot->readListBegin(_etype997, _size994); - (*(this->success)).resize(_size994); - uint32_t _i998; - for (_i998 = 0; _i998 < _size994; ++_i998) + uint32_t _size1014; + ::apache::thrift::protocol::TType _etype1017; + xfer += iprot->readListBegin(_etype1017, _size1014); + (*(this->success)).resize(_size1014); + uint32_t _i1018; + for (_i1018 = 0; _i1018 < _size1014; ++_i1018) { - xfer += iprot->readString((*(this->success))[_i998]); + xfer += iprot->readString((*(this->success))[_i1018]); } xfer += iprot->readListEnd(); } @@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size999; - ::apache::thrift::protocol::TType _etype1002; - xfer += iprot->readListBegin(_etype1002, _size999); - this->success.resize(_size999); - uint32_t _i1003; - for (_i1003 = 0; _i1003 < _size999; ++_i1003) + uint32_t _size1019; + ::apache::thrift::protocol::TType _etype1022; + xfer += iprot->readListBegin(_etype1022, _size1019); + this->success.resize(_size1019); + uint32_t _i1023; + for (_i1023 = 0; _i1023 < _size1019; ++_i1023) { - xfer += iprot->readString(this->success[_i1003]); + xfer += iprot->readString(this->success[_i1023]); } xfer += iprot->readListEnd(); } @@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1004; - for (_iter1004 = this->success.begin(); _iter1004 != this->success.end(); ++_iter1004) + std::vector ::const_iterator _iter1024; + for (_iter1024 = this->success.begin(); _iter1024 != this->success.end(); ++_iter1024) { - xfer += oprot->writeString((*_iter1004)); + xfer += oprot->writeString((*_iter1024)); } xfer += oprot->writeListEnd(); } @@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1005; - ::apache::thrift::protocol::TType _etype1008; - xfer += iprot->readListBegin(_etype1008, _size1005); - (*(this->success)).resize(_size1005); - uint32_t _i1009; - for (_i1009 = 0; _i1009 < _size1005; ++_i1009) + uint32_t _size1025; + ::apache::thrift::protocol::TType _etype1028; + xfer += iprot->readListBegin(_etype1028, _size1025); + (*(this->success)).resize(_size1025); + uint32_t _i1029; + for (_i1029 = 0; _i1029 < _size1025; ++_i1029) { - xfer += iprot->readString((*(this->success))[_i1009]); + xfer += iprot->readString((*(this->success))[_i1029]); } xfer += iprot->readListEnd(); } @@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1010; - ::apache::thrift::protocol::TType _ktype1011; - ::apache::thrift::protocol::TType _vtype1012; - xfer += iprot->readMapBegin(_ktype1011, _vtype1012, _size1010); - uint32_t _i1014; - for (_i1014 = 0; _i1014 < _size1010; ++_i1014) + uint32_t _size1030; + ::apache::thrift::protocol::TType _ktype1031; + ::apache::thrift::protocol::TType _vtype1032; + xfer += iprot->readMapBegin(_ktype1031, _vtype1032, _size1030); + uint32_t _i1034; + for (_i1034 = 0; _i1034 < _size1030; ++_i1034) { - std::string _key1015; - xfer += iprot->readString(_key1015); - Type& _val1016 = this->success[_key1015]; - xfer += _val1016.read(iprot); + std::string _key1035; + xfer += iprot->readString(_key1035); + Type& _val1036 = this->success[_key1035]; + xfer += _val1036.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1017; - for (_iter1017 = this->success.begin(); _iter1017 != this->success.end(); ++_iter1017) + std::map ::const_iterator _iter1037; + for (_iter1037 = this->success.begin(); _iter1037 != this->success.end(); ++_iter1037) { - xfer += oprot->writeString(_iter1017->first); - xfer += _iter1017->second.write(oprot); + xfer += oprot->writeString(_iter1037->first); + xfer += _iter1037->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1018; - ::apache::thrift::protocol::TType _ktype1019; - ::apache::thrift::protocol::TType _vtype1020; - xfer += iprot->readMapBegin(_ktype1019, _vtype1020, _size1018); - uint32_t _i1022; - for (_i1022 = 0; _i1022 < _size1018; ++_i1022) + uint32_t _size1038; + ::apache::thrift::protocol::TType _ktype1039; + ::apache::thrift::protocol::TType _vtype1040; + xfer += iprot->readMapBegin(_ktype1039, _vtype1040, _size1038); + uint32_t _i1042; + for (_i1042 = 0; _i1042 < _size1038; ++_i1042) { - std::string _key1023; - xfer += iprot->readString(_key1023); - Type& _val1024 = (*(this->success))[_key1023]; - xfer += _val1024.read(iprot); + std::string _key1043; + xfer += iprot->readString(_key1043); + Type& _val1044 = (*(this->success))[_key1043]; + xfer += _val1044.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1025; - ::apache::thrift::protocol::TType _etype1028; - xfer += iprot->readListBegin(_etype1028, _size1025); - this->success.resize(_size1025); - uint32_t _i1029; - for (_i1029 = 0; _i1029 < _size1025; ++_i1029) + uint32_t _size1045; + ::apache::thrift::protocol::TType _etype1048; + xfer += iprot->readListBegin(_etype1048, _size1045); + this->success.resize(_size1045); + uint32_t _i1049; + for (_i1049 = 0; _i1049 < _size1045; ++_i1049) { - xfer += this->success[_i1029].read(iprot); + xfer += this->success[_i1049].read(iprot); } xfer += iprot->readListEnd(); } @@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1030; - for (_iter1030 = this->success.begin(); _iter1030 != this->success.end(); ++_iter1030) + std::vector ::const_iterator _iter1050; + for (_iter1050 = this->success.begin(); _iter1050 != this->success.end(); ++_iter1050) { - xfer += (*_iter1030).write(oprot); + xfer += (*_iter1050).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1031; - ::apache::thrift::protocol::TType _etype1034; - xfer += iprot->readListBegin(_etype1034, _size1031); - (*(this->success)).resize(_size1031); - uint32_t _i1035; - for (_i1035 = 0; _i1035 < _size1031; ++_i1035) + uint32_t _size1051; + ::apache::thrift::protocol::TType _etype1054; + xfer += iprot->readListBegin(_etype1054, _size1051); + (*(this->success)).resize(_size1051); + uint32_t _i1055; + for (_i1055 = 0; _i1055 < _size1051; ++_i1055) { - xfer += (*(this->success))[_i1035].read(iprot); + xfer += (*(this->success))[_i1055].read(iprot); } xfer += iprot->readListEnd(); } @@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1036; - ::apache::thrift::protocol::TType _etype1039; - xfer += iprot->readListBegin(_etype1039, _size1036); - this->success.resize(_size1036); - uint32_t _i1040; - for (_i1040 = 0; _i1040 < _size1036; ++_i1040) + uint32_t _size1056; + ::apache::thrift::protocol::TType _etype1059; + xfer += iprot->readListBegin(_etype1059, _size1056); + this->success.resize(_size1056); + uint32_t _i1060; + for (_i1060 = 0; _i1060 < _size1056; ++_i1060) { - xfer += this->success[_i1040].read(iprot); + xfer += this->success[_i1060].read(iprot); } xfer += iprot->readListEnd(); } @@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1041; - for (_iter1041 = this->success.begin(); _iter1041 != this->success.end(); ++_iter1041) + std::vector ::const_iterator _iter1061; + for (_iter1061 = this->success.begin(); _iter1061 != this->success.end(); ++_iter1061) { - xfer += (*_iter1041).write(oprot); + xfer += (*_iter1061).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1042; - ::apache::thrift::protocol::TType _etype1045; - xfer += iprot->readListBegin(_etype1045, _size1042); - (*(this->success)).resize(_size1042); - uint32_t _i1046; - for (_i1046 = 0; _i1046 < _size1042; ++_i1046) + uint32_t _size1062; + ::apache::thrift::protocol::TType _etype1065; + xfer += iprot->readListBegin(_etype1065, _size1062); + (*(this->success)).resize(_size1062); + uint32_t _i1066; + for (_i1066 = 0; _i1066 < _size1062; ++_i1066) { - xfer += (*(this->success))[_i1046].read(iprot); + xfer += (*(this->success))[_i1066].read(iprot); } xfer += iprot->readListEnd(); } @@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1047; - ::apache::thrift::protocol::TType _etype1050; - xfer += iprot->readListBegin(_etype1050, _size1047); - this->success.resize(_size1047); - uint32_t _i1051; - for (_i1051 = 0; _i1051 < _size1047; ++_i1051) + uint32_t _size1067; + ::apache::thrift::protocol::TType _etype1070; + xfer += iprot->readListBegin(_etype1070, _size1067); + this->success.resize(_size1067); + uint32_t _i1071; + for (_i1071 = 0; _i1071 < _size1067; ++_i1071) { - xfer += this->success[_i1051].read(iprot); + xfer += this->success[_i1071].read(iprot); } xfer += iprot->readListEnd(); } @@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1052; - for (_iter1052 = this->success.begin(); _iter1052 != this->success.end(); ++_iter1052) + std::vector ::const_iterator _iter1072; + for (_iter1072 = this->success.begin(); _iter1072 != this->success.end(); ++_iter1072) { - xfer += (*_iter1052).write(oprot); + xfer += (*_iter1072).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1053; - ::apache::thrift::protocol::TType _etype1056; - xfer += iprot->readListBegin(_etype1056, _size1053); - (*(this->success)).resize(_size1053); - uint32_t _i1057; - for (_i1057 = 0; _i1057 < _size1053; ++_i1057) + uint32_t _size1073; + ::apache::thrift::protocol::TType _etype1076; + xfer += iprot->readListBegin(_etype1076, _size1073); + (*(this->success)).resize(_size1073); + uint32_t _i1077; + for (_i1077 = 0; _i1077 < _size1073; ++_i1077) { - xfer += (*(this->success))[_i1057].read(iprot); + xfer += (*(this->success))[_i1077].read(iprot); } xfer += iprot->readListEnd(); } @@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1058; - ::apache::thrift::protocol::TType _etype1061; - xfer += iprot->readListBegin(_etype1061, _size1058); - this->success.resize(_size1058); - uint32_t _i1062; - for (_i1062 = 0; _i1062 < _size1058; ++_i1062) + uint32_t _size1078; + ::apache::thrift::protocol::TType _etype1081; + xfer += iprot->readListBegin(_etype1081, _size1078); + this->success.resize(_size1078); + uint32_t _i1082; + for (_i1082 = 0; _i1082 < _size1078; ++_i1082) { - xfer += this->success[_i1062].read(iprot); + xfer += this->success[_i1082].read(iprot); } xfer += iprot->readListEnd(); } @@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1063; - for (_iter1063 = this->success.begin(); _iter1063 != this->success.end(); ++_iter1063) + std::vector ::const_iterator _iter1083; + for (_iter1083 = this->success.begin(); _iter1083 != this->success.end(); ++_iter1083) { - xfer += (*_iter1063).write(oprot); + xfer += (*_iter1083).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1064; - ::apache::thrift::protocol::TType _etype1067; - xfer += iprot->readListBegin(_etype1067, _size1064); - (*(this->success)).resize(_size1064); - uint32_t _i1068; - for (_i1068 = 0; _i1068 < _size1064; ++_i1068) + uint32_t _size1084; + ::apache::thrift::protocol::TType _etype1087; + xfer += iprot->readListBegin(_etype1087, _size1084); + (*(this->success)).resize(_size1084); + uint32_t _i1088; + for (_i1088 = 0; _i1088 < _size1084; ++_i1088) { - xfer += (*(this->success))[_i1068].read(iprot); + xfer += (*(this->success))[_i1088].read(iprot); } xfer += iprot->readListEnd(); } @@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size1069; - ::apache::thrift::protocol::TType _etype1072; - xfer += iprot->readListBegin(_etype1072, _size1069); - this->primaryKeys.resize(_size1069); - uint32_t _i1073; - for (_i1073 = 0; _i1073 < _size1069; ++_i1073) + uint32_t _size1089; + ::apache::thrift::protocol::TType _etype1092; + xfer += iprot->readListBegin(_etype1092, _size1089); + this->primaryKeys.resize(_size1089); + uint32_t _i1093; + for (_i1093 = 0; _i1093 < _size1089; ++_i1093) { - xfer += this->primaryKeys[_i1073].read(iprot); + xfer += this->primaryKeys[_i1093].read(iprot); } xfer += iprot->readListEnd(); } @@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size1074; - ::apache::thrift::protocol::TType _etype1077; - xfer += iprot->readListBegin(_etype1077, _size1074); - this->foreignKeys.resize(_size1074); - uint32_t _i1078; - for (_i1078 = 0; _i1078 < _size1074; ++_i1078) + uint32_t _size1094; + ::apache::thrift::protocol::TType _etype1097; + xfer += iprot->readListBegin(_etype1097, _size1094); + this->foreignKeys.resize(_size1094); + uint32_t _i1098; + for (_i1098 = 0; _i1098 < _size1094; ++_i1098) { - xfer += this->foreignKeys[_i1078].read(iprot); + xfer += this->foreignKeys[_i1098].read(iprot); } xfer += iprot->readListEnd(); } @@ -4558,14 +4558,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraints.clear(); - uint32_t _size1079; - ::apache::thrift::protocol::TType _etype1082; - xfer += iprot->readListBegin(_etype1082, _size1079); - this->uniqueConstraints.resize(_size1079); - uint32_t _i1083; - for (_i1083 = 0; _i1083 < _size1079; ++_i1083) + uint32_t _size1099; + ::apache::thrift::protocol::TType _etype1102; + xfer += iprot->readListBegin(_etype1102, _size1099); + this->uniqueConstraints.resize(_size1099); + uint32_t _i1103; + for (_i1103 = 0; _i1103 < _size1099; ++_i1103) { - xfer += this->uniqueConstraints[_i1083].read(iprot); + xfer += this->uniqueConstraints[_i1103].read(iprot); } xfer += iprot->readListEnd(); } @@ -4578,14 +4578,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraints.clear(); - uint32_t _size1084; - ::apache::thrift::protocol::TType _etype1087; - xfer += iprot->readListBegin(_etype1087, _size1084); - this->notNullConstraints.resize(_size1084); - uint32_t _i1088; - for (_i1088 = 0; _i1088 < _size1084; ++_i1088) + uint32_t _size1104; + ::apache::thrift::protocol::TType _etype1107; + xfer += iprot->readListBegin(_etype1107, _size1104); + this->notNullConstraints.resize(_size1104); + uint32_t _i1108; + for (_i1108 = 0; _i1108 < _size1104; ++_i1108) { - xfer += this->notNullConstraints[_i1088].read(iprot); + xfer += this->notNullConstraints[_i1108].read(iprot); } xfer += iprot->readListEnd(); } @@ -4618,10 +4618,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter1089; - for (_iter1089 = this->primaryKeys.begin(); _iter1089 != this->primaryKeys.end(); ++_iter1089) + std::vector ::const_iterator _iter1109; + for (_iter1109 = this->primaryKeys.begin(); _iter1109 != this->primaryKeys.end(); ++_iter1109) { - xfer += (*_iter1089).write(oprot); + xfer += (*_iter1109).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4630,10 +4630,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter1090; - for (_iter1090 = this->foreignKeys.begin(); _iter1090 != this->foreignKeys.end(); ++_iter1090) + std::vector ::const_iterator _iter1110; + for (_iter1110 = this->foreignKeys.begin(); _iter1110 != this->foreignKeys.end(); ++_iter1110) { - xfer += (*_iter1090).write(oprot); + xfer += (*_iter1110).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4642,10 +4642,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraints.size())); - std::vector ::const_iterator _iter1091; - for (_iter1091 = this->uniqueConstraints.begin(); _iter1091 != this->uniqueConstraints.end(); ++_iter1091) + std::vector ::const_iterator _iter1111; + for (_iter1111 = this->uniqueConstraints.begin(); _iter1111 != this->uniqueConstraints.end(); ++_iter1111) { - xfer += (*_iter1091).write(oprot); + xfer += (*_iter1111).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4654,10 +4654,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraints.size())); - std::vector ::const_iterator _iter1092; - for (_iter1092 = this->notNullConstraints.begin(); _iter1092 != this->notNullConstraints.end(); ++_iter1092) + std::vector ::const_iterator _iter1112; + for (_iter1112 = this->notNullConstraints.begin(); _iter1112 != this->notNullConstraints.end(); ++_iter1112) { - xfer += (*_iter1092).write(oprot); + xfer += (*_iter1112).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4685,10 +4685,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->primaryKeys)).size())); - std::vector ::const_iterator _iter1093; - for (_iter1093 = (*(this->primaryKeys)).begin(); _iter1093 != (*(this->primaryKeys)).end(); ++_iter1093) + std::vector ::const_iterator _iter1113; + for (_iter1113 = (*(this->primaryKeys)).begin(); _iter1113 != (*(this->primaryKeys)).end(); ++_iter1113) { - xfer += (*_iter1093).write(oprot); + xfer += (*_iter1113).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4697,10 +4697,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->foreignKeys)).size())); - std::vector ::const_iterator _iter1094; - for (_iter1094 = (*(this->foreignKeys)).begin(); _iter1094 != (*(this->foreignKeys)).end(); ++_iter1094) + std::vector ::const_iterator _iter1114; + for (_iter1114 = (*(this->foreignKeys)).begin(); _iter1114 != (*(this->foreignKeys)).end(); ++_iter1114) { - xfer += (*_iter1094).write(oprot); + xfer += (*_iter1114).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4709,10 +4709,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->uniqueConstraints)).size())); - std::vector ::const_iterator _iter1095; - for (_iter1095 = (*(this->uniqueConstraints)).begin(); _iter1095 != (*(this->uniqueConstraints)).end(); ++_iter1095) + std::vector ::const_iterator _iter1115; + for (_iter1115 = (*(this->uniqueConstraints)).begin(); _iter1115 != (*(this->uniqueConstraints)).end(); ++_iter1115) { - xfer += (*_iter1095).write(oprot); + xfer += (*_iter1115).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4721,10 +4721,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->notNullConstraints)).size())); - std::vector ::const_iterator _iter1096; - for (_iter1096 = (*(this->notNullConstraints)).begin(); _iter1096 != (*(this->notNullConstraints)).end(); ++_iter1096) + std::vector ::const_iterator _iter1116; + for (_iter1116 = (*(this->notNullConstraints)).begin(); _iter1116 != (*(this->notNullConstraints)).end(); ++_iter1116) { - xfer += (*_iter1096).write(oprot); + xfer += (*_iter1116).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6478,14 +6478,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size1097; - ::apache::thrift::protocol::TType _etype1100; - xfer += iprot->readListBegin(_etype1100, _size1097); - this->partNames.resize(_size1097); - uint32_t _i1101; - for (_i1101 = 0; _i1101 < _size1097; ++_i1101) + uint32_t _size1117; + ::apache::thrift::protocol::TType _etype1120; + xfer += iprot->readListBegin(_etype1120, _size1117); + this->partNames.resize(_size1117); + uint32_t _i1121; + for (_i1121 = 0; _i1121 < _size1117; ++_i1121) { - xfer += iprot->readString(this->partNames[_i1101]); + xfer += iprot->readString(this->partNames[_i1121]); } xfer += iprot->readListEnd(); } @@ -6522,10 +6522,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter1102; - for (_iter1102 = this->partNames.begin(); _iter1102 != this->partNames.end(); ++_iter1102) + std::vector ::const_iterator _iter1122; + for (_iter1122 = this->partNames.begin(); _iter1122 != this->partNames.end(); ++_iter1122) { - xfer += oprot->writeString((*_iter1102)); + xfer += oprot->writeString((*_iter1122)); } xfer += oprot->writeListEnd(); } @@ -6557,10 +6557,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->partNames)).size())); - std::vector ::const_iterator _iter1103; - for (_iter1103 = (*(this->partNames)).begin(); _iter1103 != (*(this->partNames)).end(); ++_iter1103) + std::vector ::const_iterator _iter1123; + for (_iter1123 = (*(this->partNames)).begin(); _iter1123 != (*(this->partNames)).end(); ++_iter1123) { - xfer += oprot->writeString((*_iter1103)); + xfer += oprot->writeString((*_iter1123)); } xfer += oprot->writeListEnd(); } @@ -6804,285 +6804,285 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1104; - ::apache::thrift::protocol::TType _etype1107; - xfer += iprot->readListBegin(_etype1107, _size1104); - this->success.resize(_size1104); - uint32_t _i1108; - for (_i1108 = 0; _i1108 < _size1104; ++_i1108) - { - xfer += iprot->readString(this->success[_i1108]); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_result"); - - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1109; - for (_iter1109 = this->success.begin(); _iter1109 != this->success.end(); ++_iter1109) - { - xfer += oprot->writeString((*_iter1109)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_tables_presult::~ThriftHiveMetastore_get_tables_presult() throw() { -} - - -uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1110; - ::apache::thrift::protocol::TType _etype1113; - xfer += iprot->readListBegin(_etype1113, _size1110); - (*(this->success)).resize(_size1110); - uint32_t _i1114; - for (_i1114 = 0; _i1114 < _size1110; ++_i1114) - { - xfer += iprot->readString((*(this->success))[_i1114]); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - - -ThriftHiveMetastore_get_tables_by_type_args::~ThriftHiveMetastore_get_tables_by_type_args() throw() { -} - - -uint32_t ThriftHiveMetastore_get_tables_by_type_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->pattern); - this->__isset.pattern = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->tableType); - this->__isset.tableType = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t ThriftHiveMetastore_get_tables_by_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_by_type_args"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->pattern); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tableType", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString(this->tableType); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_tables_by_type_pargs::~ThriftHiveMetastore_get_tables_by_type_pargs() throw() { -} - - -uint32_t ThriftHiveMetastore_get_tables_by_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_by_type_pargs"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->pattern))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tableType", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString((*(this->tableType))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_tables_by_type_result::~ThriftHiveMetastore_get_tables_by_type_result() throw() { -} - - -uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1115; - ::apache::thrift::protocol::TType _etype1118; - xfer += iprot->readListBegin(_etype1118, _size1115); - this->success.resize(_size1115); - uint32_t _i1119; - for (_i1119 = 0; _i1119 < _size1115; ++_i1119) + uint32_t _size1124; + ::apache::thrift::protocol::TType _etype1127; + xfer += iprot->readListBegin(_etype1127, _size1124); + this->success.resize(_size1124); + uint32_t _i1128; + for (_i1128 = 0; _i1128 < _size1124; ++_i1128) + { + xfer += iprot->readString(this->success[_i1128]); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter1129; + for (_iter1129 = this->success.begin(); _iter1129 != this->success.end(); ++_iter1129) + { + xfer += oprot->writeString((*_iter1129)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_tables_presult::~ThriftHiveMetastore_get_tables_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1130; + ::apache::thrift::protocol::TType _etype1133; + xfer += iprot->readListBegin(_etype1133, _size1130); + (*(this->success)).resize(_size1130); + uint32_t _i1134; + for (_i1134 = 0; _i1134 < _size1130; ++_i1134) + { + xfer += iprot->readString((*(this->success))[_i1134]); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_tables_by_type_args::~ThriftHiveMetastore_get_tables_by_type_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_tables_by_type_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->pattern); + this->__isset.pattern = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tableType); + this->__isset.tableType = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_tables_by_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_by_type_args"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->pattern); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tableType", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->tableType); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_tables_by_type_pargs::~ThriftHiveMetastore_get_tables_by_type_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_tables_by_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_by_type_pargs"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->pattern))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tableType", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString((*(this->tableType))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_tables_by_type_result::~ThriftHiveMetastore_get_tables_by_type_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1135; + ::apache::thrift::protocol::TType _etype1138; + xfer += iprot->readListBegin(_etype1138, _size1135); + this->success.resize(_size1135); + uint32_t _i1139; + for (_i1139 = 0; _i1139 < _size1135; ++_i1139) { - xfer += iprot->readString(this->success[_i1119]); + xfer += iprot->readString(this->success[_i1139]); } xfer += iprot->readListEnd(); } @@ -7121,10 +7121,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift:: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1120; - for (_iter1120 = this->success.begin(); _iter1120 != this->success.end(); ++_iter1120) + std::vector ::const_iterator _iter1140; + for (_iter1140 = this->success.begin(); _iter1140 != this->success.end(); ++_iter1140) { - xfer += oprot->writeString((*_iter1120)); + xfer += oprot->writeString((*_iter1140)); } xfer += oprot->writeListEnd(); } @@ -7169,14 +7169,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1121; - ::apache::thrift::protocol::TType _etype1124; - xfer += iprot->readListBegin(_etype1124, _size1121); - (*(this->success)).resize(_size1121); - uint32_t _i1125; - for (_i1125 = 0; _i1125 < _size1121; ++_i1125) + uint32_t _size1141; + ::apache::thrift::protocol::TType _etype1144; + xfer += iprot->readListBegin(_etype1144, _size1141); + (*(this->success)).resize(_size1141); + uint32_t _i1145; + for (_i1145 = 0; _i1145 < _size1141; ++_i1145) { - xfer += iprot->readString((*(this->success))[_i1125]); + xfer += iprot->readString((*(this->success))[_i1145]); } xfer += iprot->readListEnd(); } @@ -7251,14 +7251,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_types.clear(); - uint32_t _size1126; - ::apache::thrift::protocol::TType _etype1129; - xfer += iprot->readListBegin(_etype1129, _size1126); - this->tbl_types.resize(_size1126); - uint32_t _i1130; - for (_i1130 = 0; _i1130 < _size1126; ++_i1130) + uint32_t _size1146; + ::apache::thrift::protocol::TType _etype1149; + xfer += iprot->readListBegin(_etype1149, _size1146); + this->tbl_types.resize(_size1146); + uint32_t _i1150; + for (_i1150 = 0; _i1150 < _size1146; ++_i1150) { - xfer += iprot->readString(this->tbl_types[_i1130]); + xfer += iprot->readString(this->tbl_types[_i1150]); } xfer += iprot->readListEnd(); } @@ -7295,10 +7295,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter1131; - for (_iter1131 = this->tbl_types.begin(); _iter1131 != this->tbl_types.end(); ++_iter1131) + std::vector ::const_iterator _iter1151; + for (_iter1151 = this->tbl_types.begin(); _iter1151 != this->tbl_types.end(); ++_iter1151) { - xfer += oprot->writeString((*_iter1131)); + xfer += oprot->writeString((*_iter1151)); } xfer += oprot->writeListEnd(); } @@ -7330,10 +7330,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter1132; - for (_iter1132 = (*(this->tbl_types)).begin(); _iter1132 != (*(this->tbl_types)).end(); ++_iter1132) + std::vector ::const_iterator _iter1152; + for (_iter1152 = (*(this->tbl_types)).begin(); _iter1152 != (*(this->tbl_types)).end(); ++_iter1152) { - xfer += oprot->writeString((*_iter1132)); + xfer += oprot->writeString((*_iter1152)); } xfer += oprot->writeListEnd(); } @@ -7374,14 +7374,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1133; - ::apache::thrift::protocol::TType _etype1136; - xfer += iprot->readListBegin(_etype1136, _size1133); - this->success.resize(_size1133); - uint32_t _i1137; - for (_i1137 = 0; _i1137 < _size1133; ++_i1137) + uint32_t _size1153; + ::apache::thrift::protocol::TType _etype1156; + xfer += iprot->readListBegin(_etype1156, _size1153); + this->success.resize(_size1153); + uint32_t _i1157; + for (_i1157 = 0; _i1157 < _size1153; ++_i1157) { - xfer += this->success[_i1137].read(iprot); + xfer += this->success[_i1157].read(iprot); } xfer += iprot->readListEnd(); } @@ -7420,10 +7420,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1138; - for (_iter1138 = this->success.begin(); _iter1138 != this->success.end(); ++_iter1138) + std::vector ::const_iterator _iter1158; + for (_iter1158 = this->success.begin(); _iter1158 != this->success.end(); ++_iter1158) { - xfer += (*_iter1138).write(oprot); + xfer += (*_iter1158).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7468,14 +7468,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1139; - ::apache::thrift::protocol::TType _etype1142; - xfer += iprot->readListBegin(_etype1142, _size1139); - (*(this->success)).resize(_size1139); - uint32_t _i1143; - for (_i1143 = 0; _i1143 < _size1139; ++_i1143) + uint32_t _size1159; + ::apache::thrift::protocol::TType _etype1162; + xfer += iprot->readListBegin(_etype1162, _size1159); + (*(this->success)).resize(_size1159); + uint32_t _i1163; + for (_i1163 = 0; _i1163 < _size1159; ++_i1163) { - xfer += (*(this->success))[_i1143].read(iprot); + xfer += (*(this->success))[_i1163].read(iprot); } xfer += iprot->readListEnd(); } @@ -7613,14 +7613,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1144; - ::apache::thrift::protocol::TType _etype1147; - xfer += iprot->readListBegin(_etype1147, _size1144); - this->success.resize(_size1144); - uint32_t _i1148; - for (_i1148 = 0; _i1148 < _size1144; ++_i1148) + uint32_t _size1164; + ::apache::thrift::protocol::TType _etype1167; + xfer += iprot->readListBegin(_etype1167, _size1164); + this->success.resize(_size1164); + uint32_t _i1168; + for (_i1168 = 0; _i1168 < _size1164; ++_i1168) { - xfer += iprot->readString(this->success[_i1148]); + xfer += iprot->readString(this->success[_i1168]); } xfer += iprot->readListEnd(); } @@ -7659,10 +7659,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1149; - for (_iter1149 = this->success.begin(); _iter1149 != this->success.end(); ++_iter1149) + std::vector ::const_iterator _iter1169; + for (_iter1169 = this->success.begin(); _iter1169 != this->success.end(); ++_iter1169) { - xfer += oprot->writeString((*_iter1149)); + xfer += oprot->writeString((*_iter1169)); } xfer += oprot->writeListEnd(); } @@ -7707,14 +7707,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1150; - ::apache::thrift::protocol::TType _etype1153; - xfer += iprot->readListBegin(_etype1153, _size1150); - (*(this->success)).resize(_size1150); - uint32_t _i1154; - for (_i1154 = 0; _i1154 < _size1150; ++_i1154) + uint32_t _size1170; + ::apache::thrift::protocol::TType _etype1173; + xfer += iprot->readListBegin(_etype1173, _size1170); + (*(this->success)).resize(_size1170); + uint32_t _i1174; + for (_i1174 = 0; _i1174 < _size1170; ++_i1174) { - xfer += iprot->readString((*(this->success))[_i1154]); + xfer += iprot->readString((*(this->success))[_i1174]); } xfer += iprot->readListEnd(); } @@ -8024,14 +8024,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1155; - ::apache::thrift::protocol::TType _etype1158; - xfer += iprot->readListBegin(_etype1158, _size1155); - this->tbl_names.resize(_size1155); - uint32_t _i1159; - for (_i1159 = 0; _i1159 < _size1155; ++_i1159) + uint32_t _size1175; + ::apache::thrift::protocol::TType _etype1178; + xfer += iprot->readListBegin(_etype1178, _size1175); + this->tbl_names.resize(_size1175); + uint32_t _i1179; + for (_i1179 = 0; _i1179 < _size1175; ++_i1179) { - xfer += iprot->readString(this->tbl_names[_i1159]); + xfer += iprot->readString(this->tbl_names[_i1179]); } xfer += iprot->readListEnd(); } @@ -8064,10 +8064,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1160; - for (_iter1160 = this->tbl_names.begin(); _iter1160 != this->tbl_names.end(); ++_iter1160) + std::vector ::const_iterator _iter1180; + for (_iter1180 = this->tbl_names.begin(); _iter1180 != this->tbl_names.end(); ++_iter1180) { - xfer += oprot->writeString((*_iter1160)); + xfer += oprot->writeString((*_iter1180)); } xfer += oprot->writeListEnd(); } @@ -8095,10 +8095,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1161; - for (_iter1161 = (*(this->tbl_names)).begin(); _iter1161 != (*(this->tbl_names)).end(); ++_iter1161) + std::vector ::const_iterator _iter1181; + for (_iter1181 = (*(this->tbl_names)).begin(); _iter1181 != (*(this->tbl_names)).end(); ++_iter1181) { - xfer += oprot->writeString((*_iter1161)); + xfer += oprot->writeString((*_iter1181)); } xfer += oprot->writeListEnd(); } @@ -8139,14 +8139,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1162; - ::apache::thrift::protocol::TType _etype1165; - xfer += iprot->readListBegin(_etype1165, _size1162); - this->success.resize(_size1162); - uint32_t _i1166; - for (_i1166 = 0; _i1166 < _size1162; ++_i1166) + uint32_t _size1182; + ::apache::thrift::protocol::TType _etype1185; + xfer += iprot->readListBegin(_etype1185, _size1182); + this->success.resize(_size1182); + uint32_t _i1186; + for (_i1186 = 0; _i1186 < _size1182; ++_i1186) { - xfer += this->success[_i1166].read(iprot); + xfer += this->success[_i1186].read(iprot); } xfer += iprot->readListEnd(); } @@ -8177,10 +8177,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1167; - for (_iter1167 = this->success.begin(); _iter1167 != this->success.end(); ++_iter1167) + std::vector
::const_iterator _iter1187; + for (_iter1187 = this->success.begin(); _iter1187 != this->success.end(); ++_iter1187) { - xfer += (*_iter1167).write(oprot); + xfer += (*_iter1187).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8221,14 +8221,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1168; - ::apache::thrift::protocol::TType _etype1171; - xfer += iprot->readListBegin(_etype1171, _size1168); - (*(this->success)).resize(_size1168); - uint32_t _i1172; - for (_i1172 = 0; _i1172 < _size1168; ++_i1172) + uint32_t _size1188; + ::apache::thrift::protocol::TType _etype1191; + xfer += iprot->readListBegin(_etype1191, _size1188); + (*(this->success)).resize(_size1188); + uint32_t _i1192; + for (_i1192 = 0; _i1192 < _size1188; ++_i1192) { - xfer += (*(this->success))[_i1172].read(iprot); + xfer += (*(this->success))[_i1192].read(iprot); } xfer += iprot->readListEnd(); } @@ -8864,14 +8864,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1173; - ::apache::thrift::protocol::TType _etype1176; - xfer += iprot->readListBegin(_etype1176, _size1173); - this->success.resize(_size1173); - uint32_t _i1177; - for (_i1177 = 0; _i1177 < _size1173; ++_i1177) + uint32_t _size1193; + ::apache::thrift::protocol::TType _etype1196; + xfer += iprot->readListBegin(_etype1196, _size1193); + this->success.resize(_size1193); + uint32_t _i1197; + for (_i1197 = 0; _i1197 < _size1193; ++_i1197) { - xfer += iprot->readString(this->success[_i1177]); + xfer += iprot->readString(this->success[_i1197]); } xfer += iprot->readListEnd(); } @@ -8926,10 +8926,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1178; - for (_iter1178 = this->success.begin(); _iter1178 != this->success.end(); ++_iter1178) + std::vector ::const_iterator _iter1198; + for (_iter1198 = this->success.begin(); _iter1198 != this->success.end(); ++_iter1198) { - xfer += oprot->writeString((*_iter1178)); + xfer += oprot->writeString((*_iter1198)); } xfer += oprot->writeListEnd(); } @@ -8982,14 +8982,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1179; - ::apache::thrift::protocol::TType _etype1182; - xfer += iprot->readListBegin(_etype1182, _size1179); - (*(this->success)).resize(_size1179); - uint32_t _i1183; - for (_i1183 = 0; _i1183 < _size1179; ++_i1183) + uint32_t _size1199; + ::apache::thrift::protocol::TType _etype1202; + xfer += iprot->readListBegin(_etype1202, _size1199); + (*(this->success)).resize(_size1199); + uint32_t _i1203; + for (_i1203 = 0; _i1203 < _size1199; ++_i1203) { - xfer += iprot->readString((*(this->success))[_i1183]); + xfer += iprot->readString((*(this->success))[_i1203]); } xfer += iprot->readListEnd(); } @@ -10323,14 +10323,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1184; - ::apache::thrift::protocol::TType _etype1187; - xfer += iprot->readListBegin(_etype1187, _size1184); - this->new_parts.resize(_size1184); - uint32_t _i1188; - for (_i1188 = 0; _i1188 < _size1184; ++_i1188) + uint32_t _size1204; + ::apache::thrift::protocol::TType _etype1207; + xfer += iprot->readListBegin(_etype1207, _size1204); + this->new_parts.resize(_size1204); + uint32_t _i1208; + for (_i1208 = 0; _i1208 < _size1204; ++_i1208) { - xfer += this->new_parts[_i1188].read(iprot); + xfer += this->new_parts[_i1208].read(iprot); } xfer += iprot->readListEnd(); } @@ -10359,10 +10359,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1189; - for (_iter1189 = this->new_parts.begin(); _iter1189 != this->new_parts.end(); ++_iter1189) + std::vector ::const_iterator _iter1209; + for (_iter1209 = this->new_parts.begin(); _iter1209 != this->new_parts.end(); ++_iter1209) { - xfer += (*_iter1189).write(oprot); + xfer += (*_iter1209).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10386,10 +10386,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1190; - for (_iter1190 = (*(this->new_parts)).begin(); _iter1190 != (*(this->new_parts)).end(); ++_iter1190) + std::vector ::const_iterator _iter1210; + for (_iter1210 = (*(this->new_parts)).begin(); _iter1210 != (*(this->new_parts)).end(); ++_iter1210) { - xfer += (*_iter1190).write(oprot); + xfer += (*_iter1210).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10598,14 +10598,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1191; - ::apache::thrift::protocol::TType _etype1194; - xfer += iprot->readListBegin(_etype1194, _size1191); - this->new_parts.resize(_size1191); - uint32_t _i1195; - for (_i1195 = 0; _i1195 < _size1191; ++_i1195) + uint32_t _size1211; + ::apache::thrift::protocol::TType _etype1214; + xfer += iprot->readListBegin(_etype1214, _size1211); + this->new_parts.resize(_size1211); + uint32_t _i1215; + for (_i1215 = 0; _i1215 < _size1211; ++_i1215) { - xfer += this->new_parts[_i1195].read(iprot); + xfer += this->new_parts[_i1215].read(iprot); } xfer += iprot->readListEnd(); } @@ -10634,10 +10634,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1196; - for (_iter1196 = this->new_parts.begin(); _iter1196 != this->new_parts.end(); ++_iter1196) + std::vector ::const_iterator _iter1216; + for (_iter1216 = this->new_parts.begin(); _iter1216 != this->new_parts.end(); ++_iter1216) { - xfer += (*_iter1196).write(oprot); + xfer += (*_iter1216).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10661,10 +10661,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1197; - for (_iter1197 = (*(this->new_parts)).begin(); _iter1197 != (*(this->new_parts)).end(); ++_iter1197) + std::vector ::const_iterator _iter1217; + for (_iter1217 = (*(this->new_parts)).begin(); _iter1217 != (*(this->new_parts)).end(); ++_iter1217) { - xfer += (*_iter1197).write(oprot); + xfer += (*_iter1217).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10889,14 +10889,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1198; - ::apache::thrift::protocol::TType _etype1201; - xfer += iprot->readListBegin(_etype1201, _size1198); - this->part_vals.resize(_size1198); - uint32_t _i1202; - for (_i1202 = 0; _i1202 < _size1198; ++_i1202) + uint32_t _size1218; + ::apache::thrift::protocol::TType _etype1221; + xfer += iprot->readListBegin(_etype1221, _size1218); + this->part_vals.resize(_size1218); + uint32_t _i1222; + for (_i1222 = 0; _i1222 < _size1218; ++_i1222) { - xfer += iprot->readString(this->part_vals[_i1202]); + xfer += iprot->readString(this->part_vals[_i1222]); } xfer += iprot->readListEnd(); } @@ -10933,10 +10933,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1203; - for (_iter1203 = this->part_vals.begin(); _iter1203 != this->part_vals.end(); ++_iter1203) + std::vector ::const_iterator _iter1223; + for (_iter1223 = this->part_vals.begin(); _iter1223 != this->part_vals.end(); ++_iter1223) { - xfer += oprot->writeString((*_iter1203)); + xfer += oprot->writeString((*_iter1223)); } xfer += oprot->writeListEnd(); } @@ -10968,10 +10968,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1204; - for (_iter1204 = (*(this->part_vals)).begin(); _iter1204 != (*(this->part_vals)).end(); ++_iter1204) + std::vector ::const_iterator _iter1224; + for (_iter1224 = (*(this->part_vals)).begin(); _iter1224 != (*(this->part_vals)).end(); ++_iter1224) { - xfer += oprot->writeString((*_iter1204)); + xfer += oprot->writeString((*_iter1224)); } xfer += oprot->writeListEnd(); } @@ -11443,14 +11443,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1205; - ::apache::thrift::protocol::TType _etype1208; - xfer += iprot->readListBegin(_etype1208, _size1205); - this->part_vals.resize(_size1205); - uint32_t _i1209; - for (_i1209 = 0; _i1209 < _size1205; ++_i1209) + uint32_t _size1225; + ::apache::thrift::protocol::TType _etype1228; + xfer += iprot->readListBegin(_etype1228, _size1225); + this->part_vals.resize(_size1225); + uint32_t _i1229; + for (_i1229 = 0; _i1229 < _size1225; ++_i1229) { - xfer += iprot->readString(this->part_vals[_i1209]); + xfer += iprot->readString(this->part_vals[_i1229]); } xfer += iprot->readListEnd(); } @@ -11495,10 +11495,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1210; - for (_iter1210 = this->part_vals.begin(); _iter1210 != this->part_vals.end(); ++_iter1210) + std::vector ::const_iterator _iter1230; + for (_iter1230 = this->part_vals.begin(); _iter1230 != this->part_vals.end(); ++_iter1230) { - xfer += oprot->writeString((*_iter1210)); + xfer += oprot->writeString((*_iter1230)); } xfer += oprot->writeListEnd(); } @@ -11534,10 +11534,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1211; - for (_iter1211 = (*(this->part_vals)).begin(); _iter1211 != (*(this->part_vals)).end(); ++_iter1211) + std::vector ::const_iterator _iter1231; + for (_iter1231 = (*(this->part_vals)).begin(); _iter1231 != (*(this->part_vals)).end(); ++_iter1231) { - xfer += oprot->writeString((*_iter1211)); + xfer += oprot->writeString((*_iter1231)); } xfer += oprot->writeListEnd(); } @@ -12340,14 +12340,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1212; - ::apache::thrift::protocol::TType _etype1215; - xfer += iprot->readListBegin(_etype1215, _size1212); - this->part_vals.resize(_size1212); - uint32_t _i1216; - for (_i1216 = 0; _i1216 < _size1212; ++_i1216) + uint32_t _size1232; + ::apache::thrift::protocol::TType _etype1235; + xfer += iprot->readListBegin(_etype1235, _size1232); + this->part_vals.resize(_size1232); + uint32_t _i1236; + for (_i1236 = 0; _i1236 < _size1232; ++_i1236) { - xfer += iprot->readString(this->part_vals[_i1216]); + xfer += iprot->readString(this->part_vals[_i1236]); } xfer += iprot->readListEnd(); } @@ -12392,10 +12392,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1217; - for (_iter1217 = this->part_vals.begin(); _iter1217 != this->part_vals.end(); ++_iter1217) + std::vector ::const_iterator _iter1237; + for (_iter1237 = this->part_vals.begin(); _iter1237 != this->part_vals.end(); ++_iter1237) { - xfer += oprot->writeString((*_iter1217)); + xfer += oprot->writeString((*_iter1237)); } xfer += oprot->writeListEnd(); } @@ -12431,10 +12431,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1218; - for (_iter1218 = (*(this->part_vals)).begin(); _iter1218 != (*(this->part_vals)).end(); ++_iter1218) + std::vector ::const_iterator _iter1238; + for (_iter1238 = (*(this->part_vals)).begin(); _iter1238 != (*(this->part_vals)).end(); ++_iter1238) { - xfer += oprot->writeString((*_iter1218)); + xfer += oprot->writeString((*_iter1238)); } xfer += oprot->writeListEnd(); } @@ -12643,14 +12643,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1219; - ::apache::thrift::protocol::TType _etype1222; - xfer += iprot->readListBegin(_etype1222, _size1219); - this->part_vals.resize(_size1219); - uint32_t _i1223; - for (_i1223 = 0; _i1223 < _size1219; ++_i1223) + uint32_t _size1239; + ::apache::thrift::protocol::TType _etype1242; + xfer += iprot->readListBegin(_etype1242, _size1239); + this->part_vals.resize(_size1239); + uint32_t _i1243; + for (_i1243 = 0; _i1243 < _size1239; ++_i1243) { - xfer += iprot->readString(this->part_vals[_i1223]); + xfer += iprot->readString(this->part_vals[_i1243]); } xfer += iprot->readListEnd(); } @@ -12703,10 +12703,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1224; - for (_iter1224 = this->part_vals.begin(); _iter1224 != this->part_vals.end(); ++_iter1224) + std::vector ::const_iterator _iter1244; + for (_iter1244 = this->part_vals.begin(); _iter1244 != this->part_vals.end(); ++_iter1244) { - xfer += oprot->writeString((*_iter1224)); + xfer += oprot->writeString((*_iter1244)); } xfer += oprot->writeListEnd(); } @@ -12746,10 +12746,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1225; - for (_iter1225 = (*(this->part_vals)).begin(); _iter1225 != (*(this->part_vals)).end(); ++_iter1225) + std::vector ::const_iterator _iter1245; + for (_iter1245 = (*(this->part_vals)).begin(); _iter1245 != (*(this->part_vals)).end(); ++_iter1245) { - xfer += oprot->writeString((*_iter1225)); + xfer += oprot->writeString((*_iter1245)); } xfer += oprot->writeListEnd(); } @@ -13755,14 +13755,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1226; - ::apache::thrift::protocol::TType _etype1229; - xfer += iprot->readListBegin(_etype1229, _size1226); - this->part_vals.resize(_size1226); - uint32_t _i1230; - for (_i1230 = 0; _i1230 < _size1226; ++_i1230) + uint32_t _size1246; + ::apache::thrift::protocol::TType _etype1249; + xfer += iprot->readListBegin(_etype1249, _size1246); + this->part_vals.resize(_size1246); + uint32_t _i1250; + for (_i1250 = 0; _i1250 < _size1246; ++_i1250) { - xfer += iprot->readString(this->part_vals[_i1230]); + xfer += iprot->readString(this->part_vals[_i1250]); } xfer += iprot->readListEnd(); } @@ -13799,10 +13799,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1231; - for (_iter1231 = this->part_vals.begin(); _iter1231 != this->part_vals.end(); ++_iter1231) + std::vector ::const_iterator _iter1251; + for (_iter1251 = this->part_vals.begin(); _iter1251 != this->part_vals.end(); ++_iter1251) { - xfer += oprot->writeString((*_iter1231)); + xfer += oprot->writeString((*_iter1251)); } xfer += oprot->writeListEnd(); } @@ -13834,10 +13834,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1232; - for (_iter1232 = (*(this->part_vals)).begin(); _iter1232 != (*(this->part_vals)).end(); ++_iter1232) + std::vector ::const_iterator _iter1252; + for (_iter1252 = (*(this->part_vals)).begin(); _iter1252 != (*(this->part_vals)).end(); ++_iter1252) { - xfer += oprot->writeString((*_iter1232)); + xfer += oprot->writeString((*_iter1252)); } xfer += oprot->writeListEnd(); } @@ -14026,17 +14026,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1233; - ::apache::thrift::protocol::TType _ktype1234; - ::apache::thrift::protocol::TType _vtype1235; - xfer += iprot->readMapBegin(_ktype1234, _vtype1235, _size1233); - uint32_t _i1237; - for (_i1237 = 0; _i1237 < _size1233; ++_i1237) + uint32_t _size1253; + ::apache::thrift::protocol::TType _ktype1254; + ::apache::thrift::protocol::TType _vtype1255; + xfer += iprot->readMapBegin(_ktype1254, _vtype1255, _size1253); + uint32_t _i1257; + for (_i1257 = 0; _i1257 < _size1253; ++_i1257) { - std::string _key1238; - xfer += iprot->readString(_key1238); - std::string& _val1239 = this->partitionSpecs[_key1238]; - xfer += iprot->readString(_val1239); + std::string _key1258; + xfer += iprot->readString(_key1258); + std::string& _val1259 = this->partitionSpecs[_key1258]; + xfer += iprot->readString(_val1259); } xfer += iprot->readMapEnd(); } @@ -14097,11 +14097,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1240; - for (_iter1240 = this->partitionSpecs.begin(); _iter1240 != this->partitionSpecs.end(); ++_iter1240) + std::map ::const_iterator _iter1260; + for (_iter1260 = this->partitionSpecs.begin(); _iter1260 != this->partitionSpecs.end(); ++_iter1260) { - xfer += oprot->writeString(_iter1240->first); - xfer += oprot->writeString(_iter1240->second); + xfer += oprot->writeString(_iter1260->first); + xfer += oprot->writeString(_iter1260->second); } xfer += oprot->writeMapEnd(); } @@ -14141,11 +14141,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1241; - for (_iter1241 = (*(this->partitionSpecs)).begin(); _iter1241 != (*(this->partitionSpecs)).end(); ++_iter1241) + std::map ::const_iterator _iter1261; + for (_iter1261 = (*(this->partitionSpecs)).begin(); _iter1261 != (*(this->partitionSpecs)).end(); ++_iter1261) { - xfer += oprot->writeString(_iter1241->first); - xfer += oprot->writeString(_iter1241->second); + xfer += oprot->writeString(_iter1261->first); + xfer += oprot->writeString(_iter1261->second); } xfer += oprot->writeMapEnd(); } @@ -14390,17 +14390,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1242; - ::apache::thrift::protocol::TType _ktype1243; - ::apache::thrift::protocol::TType _vtype1244; - xfer += iprot->readMapBegin(_ktype1243, _vtype1244, _size1242); - uint32_t _i1246; - for (_i1246 = 0; _i1246 < _size1242; ++_i1246) + uint32_t _size1262; + ::apache::thrift::protocol::TType _ktype1263; + ::apache::thrift::protocol::TType _vtype1264; + xfer += iprot->readMapBegin(_ktype1263, _vtype1264, _size1262); + uint32_t _i1266; + for (_i1266 = 0; _i1266 < _size1262; ++_i1266) { - std::string _key1247; - xfer += iprot->readString(_key1247); - std::string& _val1248 = this->partitionSpecs[_key1247]; - xfer += iprot->readString(_val1248); + std::string _key1267; + xfer += iprot->readString(_key1267); + std::string& _val1268 = this->partitionSpecs[_key1267]; + xfer += iprot->readString(_val1268); } xfer += iprot->readMapEnd(); } @@ -14461,11 +14461,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1249; - for (_iter1249 = this->partitionSpecs.begin(); _iter1249 != this->partitionSpecs.end(); ++_iter1249) + std::map ::const_iterator _iter1269; + for (_iter1269 = this->partitionSpecs.begin(); _iter1269 != this->partitionSpecs.end(); ++_iter1269) { - xfer += oprot->writeString(_iter1249->first); - xfer += oprot->writeString(_iter1249->second); + xfer += oprot->writeString(_iter1269->first); + xfer += oprot->writeString(_iter1269->second); } xfer += oprot->writeMapEnd(); } @@ -14505,11 +14505,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1250; - for (_iter1250 = (*(this->partitionSpecs)).begin(); _iter1250 != (*(this->partitionSpecs)).end(); ++_iter1250) + std::map ::const_iterator _iter1270; + for (_iter1270 = (*(this->partitionSpecs)).begin(); _iter1270 != (*(this->partitionSpecs)).end(); ++_iter1270) { - xfer += oprot->writeString(_iter1250->first); - xfer += oprot->writeString(_iter1250->second); + xfer += oprot->writeString(_iter1270->first); + xfer += oprot->writeString(_iter1270->second); } xfer += oprot->writeMapEnd(); } @@ -14566,14 +14566,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1251; - ::apache::thrift::protocol::TType _etype1254; - xfer += iprot->readListBegin(_etype1254, _size1251); - this->success.resize(_size1251); - uint32_t _i1255; - for (_i1255 = 0; _i1255 < _size1251; ++_i1255) + uint32_t _size1271; + ::apache::thrift::protocol::TType _etype1274; + xfer += iprot->readListBegin(_etype1274, _size1271); + this->success.resize(_size1271); + uint32_t _i1275; + for (_i1275 = 0; _i1275 < _size1271; ++_i1275) { - xfer += this->success[_i1255].read(iprot); + xfer += this->success[_i1275].read(iprot); } xfer += iprot->readListEnd(); } @@ -14636,10 +14636,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1256; - for (_iter1256 = this->success.begin(); _iter1256 != this->success.end(); ++_iter1256) + std::vector ::const_iterator _iter1276; + for (_iter1276 = this->success.begin(); _iter1276 != this->success.end(); ++_iter1276) { - xfer += (*_iter1256).write(oprot); + xfer += (*_iter1276).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14696,14 +14696,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1257; - ::apache::thrift::protocol::TType _etype1260; - xfer += iprot->readListBegin(_etype1260, _size1257); - (*(this->success)).resize(_size1257); - uint32_t _i1261; - for (_i1261 = 0; _i1261 < _size1257; ++_i1261) + uint32_t _size1277; + ::apache::thrift::protocol::TType _etype1280; + xfer += iprot->readListBegin(_etype1280, _size1277); + (*(this->success)).resize(_size1277); + uint32_t _i1281; + for (_i1281 = 0; _i1281 < _size1277; ++_i1281) { - xfer += (*(this->success))[_i1261].read(iprot); + xfer += (*(this->success))[_i1281].read(iprot); } xfer += iprot->readListEnd(); } @@ -14802,14 +14802,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1262; - ::apache::thrift::protocol::TType _etype1265; - xfer += iprot->readListBegin(_etype1265, _size1262); - this->part_vals.resize(_size1262); - uint32_t _i1266; - for (_i1266 = 0; _i1266 < _size1262; ++_i1266) + uint32_t _size1282; + ::apache::thrift::protocol::TType _etype1285; + xfer += iprot->readListBegin(_etype1285, _size1282); + this->part_vals.resize(_size1282); + uint32_t _i1286; + for (_i1286 = 0; _i1286 < _size1282; ++_i1286) { - xfer += iprot->readString(this->part_vals[_i1266]); + xfer += iprot->readString(this->part_vals[_i1286]); } xfer += iprot->readListEnd(); } @@ -14830,14 +14830,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1267; - ::apache::thrift::protocol::TType _etype1270; - xfer += iprot->readListBegin(_etype1270, _size1267); - this->group_names.resize(_size1267); - uint32_t _i1271; - for (_i1271 = 0; _i1271 < _size1267; ++_i1271) + uint32_t _size1287; + ::apache::thrift::protocol::TType _etype1290; + xfer += iprot->readListBegin(_etype1290, _size1287); + this->group_names.resize(_size1287); + uint32_t _i1291; + for (_i1291 = 0; _i1291 < _size1287; ++_i1291) { - xfer += iprot->readString(this->group_names[_i1271]); + xfer += iprot->readString(this->group_names[_i1291]); } xfer += iprot->readListEnd(); } @@ -14874,10 +14874,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1272; - for (_iter1272 = this->part_vals.begin(); _iter1272 != this->part_vals.end(); ++_iter1272) + std::vector ::const_iterator _iter1292; + for (_iter1292 = this->part_vals.begin(); _iter1292 != this->part_vals.end(); ++_iter1292) { - xfer += oprot->writeString((*_iter1272)); + xfer += oprot->writeString((*_iter1292)); } xfer += oprot->writeListEnd(); } @@ -14890,10 +14890,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1273; - for (_iter1273 = this->group_names.begin(); _iter1273 != this->group_names.end(); ++_iter1273) + std::vector ::const_iterator _iter1293; + for (_iter1293 = this->group_names.begin(); _iter1293 != this->group_names.end(); ++_iter1293) { - xfer += oprot->writeString((*_iter1273)); + xfer += oprot->writeString((*_iter1293)); } xfer += oprot->writeListEnd(); } @@ -14925,10 +14925,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1274; - for (_iter1274 = (*(this->part_vals)).begin(); _iter1274 != (*(this->part_vals)).end(); ++_iter1274) + std::vector ::const_iterator _iter1294; + for (_iter1294 = (*(this->part_vals)).begin(); _iter1294 != (*(this->part_vals)).end(); ++_iter1294) { - xfer += oprot->writeString((*_iter1274)); + xfer += oprot->writeString((*_iter1294)); } xfer += oprot->writeListEnd(); } @@ -14941,10 +14941,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1275; - for (_iter1275 = (*(this->group_names)).begin(); _iter1275 != (*(this->group_names)).end(); ++_iter1275) + std::vector ::const_iterator _iter1295; + for (_iter1295 = (*(this->group_names)).begin(); _iter1295 != (*(this->group_names)).end(); ++_iter1295) { - xfer += oprot->writeString((*_iter1275)); + xfer += oprot->writeString((*_iter1295)); } xfer += oprot->writeListEnd(); } @@ -15503,14 +15503,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1276; - ::apache::thrift::protocol::TType _etype1279; - xfer += iprot->readListBegin(_etype1279, _size1276); - this->success.resize(_size1276); - uint32_t _i1280; - for (_i1280 = 0; _i1280 < _size1276; ++_i1280) + uint32_t _size1296; + ::apache::thrift::protocol::TType _etype1299; + xfer += iprot->readListBegin(_etype1299, _size1296); + this->success.resize(_size1296); + uint32_t _i1300; + for (_i1300 = 0; _i1300 < _size1296; ++_i1300) { - xfer += this->success[_i1280].read(iprot); + xfer += this->success[_i1300].read(iprot); } xfer += iprot->readListEnd(); } @@ -15557,10 +15557,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1281; - for (_iter1281 = this->success.begin(); _iter1281 != this->success.end(); ++_iter1281) + std::vector ::const_iterator _iter1301; + for (_iter1301 = this->success.begin(); _iter1301 != this->success.end(); ++_iter1301) { - xfer += (*_iter1281).write(oprot); + xfer += (*_iter1301).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15609,14 +15609,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1282; - ::apache::thrift::protocol::TType _etype1285; - xfer += iprot->readListBegin(_etype1285, _size1282); - (*(this->success)).resize(_size1282); - uint32_t _i1286; - for (_i1286 = 0; _i1286 < _size1282; ++_i1286) + uint32_t _size1302; + ::apache::thrift::protocol::TType _etype1305; + xfer += iprot->readListBegin(_etype1305, _size1302); + (*(this->success)).resize(_size1302); + uint32_t _i1306; + for (_i1306 = 0; _i1306 < _size1302; ++_i1306) { - xfer += (*(this->success))[_i1286].read(iprot); + xfer += (*(this->success))[_i1306].read(iprot); } xfer += iprot->readListEnd(); } @@ -15715,14 +15715,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1287; - ::apache::thrift::protocol::TType _etype1290; - xfer += iprot->readListBegin(_etype1290, _size1287); - this->group_names.resize(_size1287); - uint32_t _i1291; - for (_i1291 = 0; _i1291 < _size1287; ++_i1291) + uint32_t _size1307; + ::apache::thrift::protocol::TType _etype1310; + xfer += iprot->readListBegin(_etype1310, _size1307); + this->group_names.resize(_size1307); + uint32_t _i1311; + for (_i1311 = 0; _i1311 < _size1307; ++_i1311) { - xfer += iprot->readString(this->group_names[_i1291]); + xfer += iprot->readString(this->group_names[_i1311]); } xfer += iprot->readListEnd(); } @@ -15767,10 +15767,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1292; - for (_iter1292 = this->group_names.begin(); _iter1292 != this->group_names.end(); ++_iter1292) + std::vector ::const_iterator _iter1312; + for (_iter1312 = this->group_names.begin(); _iter1312 != this->group_names.end(); ++_iter1312) { - xfer += oprot->writeString((*_iter1292)); + xfer += oprot->writeString((*_iter1312)); } xfer += oprot->writeListEnd(); } @@ -15810,10 +15810,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1293; - for (_iter1293 = (*(this->group_names)).begin(); _iter1293 != (*(this->group_names)).end(); ++_iter1293) + std::vector ::const_iterator _iter1313; + for (_iter1313 = (*(this->group_names)).begin(); _iter1313 != (*(this->group_names)).end(); ++_iter1313) { - xfer += oprot->writeString((*_iter1293)); + xfer += oprot->writeString((*_iter1313)); } xfer += oprot->writeListEnd(); } @@ -15854,14 +15854,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1294; - ::apache::thrift::protocol::TType _etype1297; - xfer += iprot->readListBegin(_etype1297, _size1294); - this->success.resize(_size1294); - uint32_t _i1298; - for (_i1298 = 0; _i1298 < _size1294; ++_i1298) + uint32_t _size1314; + ::apache::thrift::protocol::TType _etype1317; + xfer += iprot->readListBegin(_etype1317, _size1314); + this->success.resize(_size1314); + uint32_t _i1318; + for (_i1318 = 0; _i1318 < _size1314; ++_i1318) { - xfer += this->success[_i1298].read(iprot); + xfer += this->success[_i1318].read(iprot); } xfer += iprot->readListEnd(); } @@ -15908,10 +15908,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1299; - for (_iter1299 = this->success.begin(); _iter1299 != this->success.end(); ++_iter1299) + std::vector ::const_iterator _iter1319; + for (_iter1319 = this->success.begin(); _iter1319 != this->success.end(); ++_iter1319) { - xfer += (*_iter1299).write(oprot); + xfer += (*_iter1319).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15960,14 +15960,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1300; - ::apache::thrift::protocol::TType _etype1303; - xfer += iprot->readListBegin(_etype1303, _size1300); - (*(this->success)).resize(_size1300); - uint32_t _i1304; - for (_i1304 = 0; _i1304 < _size1300; ++_i1304) + uint32_t _size1320; + ::apache::thrift::protocol::TType _etype1323; + xfer += iprot->readListBegin(_etype1323, _size1320); + (*(this->success)).resize(_size1320); + uint32_t _i1324; + for (_i1324 = 0; _i1324 < _size1320; ++_i1324) { - xfer += (*(this->success))[_i1304].read(iprot); + xfer += (*(this->success))[_i1324].read(iprot); } xfer += iprot->readListEnd(); } @@ -16145,14 +16145,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1305; - ::apache::thrift::protocol::TType _etype1308; - xfer += iprot->readListBegin(_etype1308, _size1305); - this->success.resize(_size1305); - uint32_t _i1309; - for (_i1309 = 0; _i1309 < _size1305; ++_i1309) + uint32_t _size1325; + ::apache::thrift::protocol::TType _etype1328; + xfer += iprot->readListBegin(_etype1328, _size1325); + this->success.resize(_size1325); + uint32_t _i1329; + for (_i1329 = 0; _i1329 < _size1325; ++_i1329) { - xfer += this->success[_i1309].read(iprot); + xfer += this->success[_i1329].read(iprot); } xfer += iprot->readListEnd(); } @@ -16199,10 +16199,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1310; - for (_iter1310 = this->success.begin(); _iter1310 != this->success.end(); ++_iter1310) + std::vector ::const_iterator _iter1330; + for (_iter1330 = this->success.begin(); _iter1330 != this->success.end(); ++_iter1330) { - xfer += (*_iter1310).write(oprot); + xfer += (*_iter1330).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16251,14 +16251,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1311; - ::apache::thrift::protocol::TType _etype1314; - xfer += iprot->readListBegin(_etype1314, _size1311); - (*(this->success)).resize(_size1311); - uint32_t _i1315; - for (_i1315 = 0; _i1315 < _size1311; ++_i1315) + uint32_t _size1331; + ::apache::thrift::protocol::TType _etype1334; + xfer += iprot->readListBegin(_etype1334, _size1331); + (*(this->success)).resize(_size1331); + uint32_t _i1335; + for (_i1335 = 0; _i1335 < _size1331; ++_i1335) { - xfer += (*(this->success))[_i1315].read(iprot); + xfer += (*(this->success))[_i1335].read(iprot); } xfer += iprot->readListEnd(); } @@ -16436,14 +16436,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1316; - ::apache::thrift::protocol::TType _etype1319; - xfer += iprot->readListBegin(_etype1319, _size1316); - this->success.resize(_size1316); - uint32_t _i1320; - for (_i1320 = 0; _i1320 < _size1316; ++_i1320) + uint32_t _size1336; + ::apache::thrift::protocol::TType _etype1339; + xfer += iprot->readListBegin(_etype1339, _size1336); + this->success.resize(_size1336); + uint32_t _i1340; + for (_i1340 = 0; _i1340 < _size1336; ++_i1340) { - xfer += iprot->readString(this->success[_i1320]); + xfer += iprot->readString(this->success[_i1340]); } xfer += iprot->readListEnd(); } @@ -16490,10 +16490,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1321; - for (_iter1321 = this->success.begin(); _iter1321 != this->success.end(); ++_iter1321) + std::vector ::const_iterator _iter1341; + for (_iter1341 = this->success.begin(); _iter1341 != this->success.end(); ++_iter1341) { - xfer += oprot->writeString((*_iter1321)); + xfer += oprot->writeString((*_iter1341)); } xfer += oprot->writeListEnd(); } @@ -16542,14 +16542,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1322; - ::apache::thrift::protocol::TType _etype1325; - xfer += iprot->readListBegin(_etype1325, _size1322); - (*(this->success)).resize(_size1322); - uint32_t _i1326; - for (_i1326 = 0; _i1326 < _size1322; ++_i1326) + uint32_t _size1342; + ::apache::thrift::protocol::TType _etype1345; + xfer += iprot->readListBegin(_etype1345, _size1342); + (*(this->success)).resize(_size1342); + uint32_t _i1346; + for (_i1346 = 0; _i1346 < _size1342; ++_i1346) { - xfer += iprot->readString((*(this->success))[_i1326]); + xfer += iprot->readString((*(this->success))[_i1346]); } xfer += iprot->readListEnd(); } @@ -16859,14 +16859,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1327; - ::apache::thrift::protocol::TType _etype1330; - xfer += iprot->readListBegin(_etype1330, _size1327); - this->part_vals.resize(_size1327); - uint32_t _i1331; - for (_i1331 = 0; _i1331 < _size1327; ++_i1331) + uint32_t _size1347; + ::apache::thrift::protocol::TType _etype1350; + xfer += iprot->readListBegin(_etype1350, _size1347); + this->part_vals.resize(_size1347); + uint32_t _i1351; + for (_i1351 = 0; _i1351 < _size1347; ++_i1351) { - xfer += iprot->readString(this->part_vals[_i1331]); + xfer += iprot->readString(this->part_vals[_i1351]); } xfer += iprot->readListEnd(); } @@ -16911,10 +16911,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1332; - for (_iter1332 = this->part_vals.begin(); _iter1332 != this->part_vals.end(); ++_iter1332) + std::vector ::const_iterator _iter1352; + for (_iter1352 = this->part_vals.begin(); _iter1352 != this->part_vals.end(); ++_iter1352) { - xfer += oprot->writeString((*_iter1332)); + xfer += oprot->writeString((*_iter1352)); } xfer += oprot->writeListEnd(); } @@ -16950,10 +16950,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1333; - for (_iter1333 = (*(this->part_vals)).begin(); _iter1333 != (*(this->part_vals)).end(); ++_iter1333) + std::vector ::const_iterator _iter1353; + for (_iter1353 = (*(this->part_vals)).begin(); _iter1353 != (*(this->part_vals)).end(); ++_iter1353) { - xfer += oprot->writeString((*_iter1333)); + xfer += oprot->writeString((*_iter1353)); } xfer += oprot->writeListEnd(); } @@ -16998,14 +16998,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1334; - ::apache::thrift::protocol::TType _etype1337; - xfer += iprot->readListBegin(_etype1337, _size1334); - this->success.resize(_size1334); - uint32_t _i1338; - for (_i1338 = 0; _i1338 < _size1334; ++_i1338) + uint32_t _size1354; + ::apache::thrift::protocol::TType _etype1357; + xfer += iprot->readListBegin(_etype1357, _size1354); + this->success.resize(_size1354); + uint32_t _i1358; + for (_i1358 = 0; _i1358 < _size1354; ++_i1358) { - xfer += this->success[_i1338].read(iprot); + xfer += this->success[_i1358].read(iprot); } xfer += iprot->readListEnd(); } @@ -17052,10 +17052,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1339; - for (_iter1339 = this->success.begin(); _iter1339 != this->success.end(); ++_iter1339) + std::vector ::const_iterator _iter1359; + for (_iter1359 = this->success.begin(); _iter1359 != this->success.end(); ++_iter1359) { - xfer += (*_iter1339).write(oprot); + xfer += (*_iter1359).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17104,14 +17104,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1340; - ::apache::thrift::protocol::TType _etype1343; - xfer += iprot->readListBegin(_etype1343, _size1340); - (*(this->success)).resize(_size1340); - uint32_t _i1344; - for (_i1344 = 0; _i1344 < _size1340; ++_i1344) + uint32_t _size1360; + ::apache::thrift::protocol::TType _etype1363; + xfer += iprot->readListBegin(_etype1363, _size1360); + (*(this->success)).resize(_size1360); + uint32_t _i1364; + for (_i1364 = 0; _i1364 < _size1360; ++_i1364) { - xfer += (*(this->success))[_i1344].read(iprot); + xfer += (*(this->success))[_i1364].read(iprot); } xfer += iprot->readListEnd(); } @@ -17194,14 +17194,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1345; - ::apache::thrift::protocol::TType _etype1348; - xfer += iprot->readListBegin(_etype1348, _size1345); - this->part_vals.resize(_size1345); - uint32_t _i1349; - for (_i1349 = 0; _i1349 < _size1345; ++_i1349) + uint32_t _size1365; + ::apache::thrift::protocol::TType _etype1368; + xfer += iprot->readListBegin(_etype1368, _size1365); + this->part_vals.resize(_size1365); + uint32_t _i1369; + for (_i1369 = 0; _i1369 < _size1365; ++_i1369) { - xfer += iprot->readString(this->part_vals[_i1349]); + xfer += iprot->readString(this->part_vals[_i1369]); } xfer += iprot->readListEnd(); } @@ -17230,385 +17230,18 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1350; - ::apache::thrift::protocol::TType _etype1353; - xfer += iprot->readListBegin(_etype1353, _size1350); - this->group_names.resize(_size1350); - uint32_t _i1354; - for (_i1354 = 0; _i1354 < _size1350; ++_i1354) - { - xfer += iprot->readString(this->group_names[_i1354]); - } - xfer += iprot->readListEnd(); - } - this->__isset.group_names = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_with_auth_args"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->tbl_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1355; - for (_iter1355 = this->part_vals.begin(); _iter1355 != this->part_vals.end(); ++_iter1355) - { - xfer += oprot->writeString((*_iter1355)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); - xfer += oprot->writeI16(this->max_parts); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 5); - xfer += oprot->writeString(this->user_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1356; - for (_iter1356 = this->group_names.begin(); _iter1356 != this->group_names.end(); ++_iter1356) - { - xfer += oprot->writeString((*_iter1356)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::~ThriftHiveMetastore_get_partitions_ps_with_auth_pargs() throw() { -} - - -uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_with_auth_pargs"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->tbl_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1357; - for (_iter1357 = (*(this->part_vals)).begin(); _iter1357 != (*(this->part_vals)).end(); ++_iter1357) - { - xfer += oprot->writeString((*_iter1357)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); - xfer += oprot->writeI16((*(this->max_parts))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 5); - xfer += oprot->writeString((*(this->user_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1358; - for (_iter1358 = (*(this->group_names)).begin(); _iter1358 != (*(this->group_names)).end(); ++_iter1358) - { - xfer += oprot->writeString((*_iter1358)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_partitions_ps_with_auth_result::~ThriftHiveMetastore_get_partitions_ps_with_auth_result() throw() { -} - - -uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1359; - ::apache::thrift::protocol::TType _etype1362; - xfer += iprot->readListBegin(_etype1362, _size1359); - this->success.resize(_size1359); - uint32_t _i1363; - for (_i1363 = 0; _i1363 < _size1359; ++_i1363) - { - xfer += this->success[_i1363].read(iprot); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_with_auth_result"); - - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1364; - for (_iter1364 = this->success.begin(); _iter1364 != this->success.end(); ++_iter1364) - { - xfer += (*_iter1364).write(oprot); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->o2.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_partitions_ps_with_auth_presult::~ThriftHiveMetastore_get_partitions_ps_with_auth_presult() throw() { -} - - -uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1365; - ::apache::thrift::protocol::TType _etype1368; - xfer += iprot->readListBegin(_etype1368, _size1365); - (*(this->success)).resize(_size1365); - uint32_t _i1369; - for (_i1369 = 0; _i1369 < _size1365; ++_i1369) - { - xfer += (*(this->success))[_i1369].read(iprot); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - - -ThriftHiveMetastore_get_partition_names_ps_args::~ThriftHiveMetastore_get_partition_names_ps_args() throw() { -} - - -uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->tbl_name); - this->__isset.tbl_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->part_vals.clear(); uint32_t _size1370; ::apache::thrift::protocol::TType _etype1373; xfer += iprot->readListBegin(_etype1373, _size1370); - this->part_vals.resize(_size1370); + this->group_names.resize(_size1370); uint32_t _i1374; for (_i1374 = 0; _i1374 < _size1370; ++_i1374) { - xfer += iprot->readString(this->part_vals[_i1374]); + xfer += iprot->readString(this->group_names[_i1374]); } xfer += iprot->readListEnd(); } - this->__isset.part_vals = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_I16) { - xfer += iprot->readI16(this->max_parts); - this->__isset.max_parts = true; + this->__isset.group_names = true; } else { xfer += iprot->skip(ftype); } @@ -17625,10 +17258,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: return xfer; } -uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_with_auth_args"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); @@ -17654,20 +17287,36 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeI16(this->max_parts); xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->user_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); + std::vector ::const_iterator _iter1376; + for (_iter1376 = this->group_names.begin(); _iter1376 != this->group_names.end(); ++_iter1376) + { + xfer += oprot->writeString((*_iter1376)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -ThriftHiveMetastore_get_partition_names_ps_pargs::~ThriftHiveMetastore_get_partition_names_ps_pargs() throw() { +ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::~ThriftHiveMetastore_get_partitions_ps_with_auth_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_with_auth_pargs"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); @@ -17680,10 +17329,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1376; - for (_iter1376 = (*(this->part_vals)).begin(); _iter1376 != (*(this->part_vals)).end(); ++_iter1376) + std::vector ::const_iterator _iter1377; + for (_iter1377 = (*(this->part_vals)).begin(); _iter1377 != (*(this->part_vals)).end(); ++_iter1377) { - xfer += oprot->writeString((*_iter1376)); + xfer += oprot->writeString((*_iter1377)); } xfer += oprot->writeListEnd(); } @@ -17693,17 +17342,33 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeI16((*(this->max_parts))); xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString((*(this->user_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); + std::vector ::const_iterator _iter1378; + for (_iter1378 = (*(this->group_names)).begin(); _iter1378 != (*(this->group_names)).end(); ++_iter1378) + { + xfer += oprot->writeString((*_iter1378)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -ThriftHiveMetastore_get_partition_names_ps_result::~ThriftHiveMetastore_get_partition_names_ps_result() throw() { +ThriftHiveMetastore_get_partitions_ps_with_auth_result::~ThriftHiveMetastore_get_partitions_ps_with_auth_result() throw() { } -uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -17728,14 +17393,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1377; - ::apache::thrift::protocol::TType _etype1380; - xfer += iprot->readListBegin(_etype1380, _size1377); - this->success.resize(_size1377); - uint32_t _i1381; - for (_i1381 = 0; _i1381 < _size1377; ++_i1381) + uint32_t _size1379; + ::apache::thrift::protocol::TType _etype1382; + xfer += iprot->readListBegin(_etype1382, _size1379); + this->success.resize(_size1379); + uint32_t _i1383; + for (_i1383 = 0; _i1383 < _size1379; ++_i1383) { - xfer += iprot->readString(this->success[_i1381]); + xfer += this->success[_i1383].read(iprot); } xfer += iprot->readListEnd(); } @@ -17772,20 +17437,20 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_with_auth_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1382; - for (_iter1382 = this->success.begin(); _iter1382 != this->success.end(); ++_iter1382) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1384; + for (_iter1384 = this->success.begin(); _iter1384 != this->success.end(); ++_iter1384) { - xfer += oprot->writeString((*_iter1382)); + xfer += (*_iter1384).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17805,11 +17470,11 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri } -ThriftHiveMetastore_get_partition_names_ps_presult::~ThriftHiveMetastore_get_partition_names_ps_presult() throw() { +ThriftHiveMetastore_get_partitions_ps_with_auth_presult::~ThriftHiveMetastore_get_partitions_ps_with_auth_presult() throw() { } -uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -17834,14 +17499,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1383; - ::apache::thrift::protocol::TType _etype1386; - xfer += iprot->readListBegin(_etype1386, _size1383); - (*(this->success)).resize(_size1383); - uint32_t _i1387; - for (_i1387 = 0; _i1387 < _size1383; ++_i1387) + uint32_t _size1385; + ::apache::thrift::protocol::TType _etype1388; + xfer += iprot->readListBegin(_etype1388, _size1385); + (*(this->success)).resize(_size1385); + uint32_t _i1389; + for (_i1389 = 0; _i1389 < _size1385; ++_i1389) { - xfer += iprot->readString((*(this->success))[_i1387]); + xfer += (*(this->success))[_i1389].read(iprot); } xfer += iprot->readListEnd(); } @@ -17879,11 +17544,11 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri } -ThriftHiveMetastore_get_partitions_by_filter_args::~ThriftHiveMetastore_get_partitions_by_filter_args() throw() { +ThriftHiveMetastore_get_partition_names_ps_args::~ThriftHiveMetastore_get_partition_names_ps_args() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -17921,9 +17586,21 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(::apache::thrif } break; case 3: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->filter); - this->__isset.filter = true; + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->part_vals.clear(); + uint32_t _size1390; + ::apache::thrift::protocol::TType _etype1393; + xfer += iprot->readListBegin(_etype1393, _size1390); + this->part_vals.resize(_size1390); + uint32_t _i1394; + for (_i1394 = 0; _i1394 < _size1390; ++_i1394) + { + xfer += iprot->readString(this->part_vals[_i1394]); + } + xfer += iprot->readListEnd(); + } + this->__isset.part_vals = true; } else { xfer += iprot->skip(ftype); } @@ -17948,10 +17625,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_args"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); @@ -17961,8 +17638,16 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(::apache::thri xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString(this->filter); + xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); + std::vector ::const_iterator _iter1395; + for (_iter1395 = this->part_vals.begin(); _iter1395 != this->part_vals.end(); ++_iter1395) + { + xfer += oprot->writeString((*_iter1395)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); @@ -17975,14 +17660,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(::apache::thri } -ThriftHiveMetastore_get_partitions_by_filter_pargs::~ThriftHiveMetastore_get_partitions_by_filter_pargs() throw() { +ThriftHiveMetastore_get_partition_names_ps_pargs::~ThriftHiveMetastore_get_partition_names_ps_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_pargs"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); @@ -17992,8 +17677,16 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(::apache::thr xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString((*(this->filter))); + xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); + std::vector ::const_iterator _iter1396; + for (_iter1396 = (*(this->part_vals)).begin(); _iter1396 != (*(this->part_vals)).end(); ++_iter1396) + { + xfer += oprot->writeString((*_iter1396)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); @@ -18006,11 +17699,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(::apache::thr } -ThriftHiveMetastore_get_partitions_by_filter_result::~ThriftHiveMetastore_get_partitions_by_filter_result() throw() { +ThriftHiveMetastore_get_partition_names_ps_result::~ThriftHiveMetastore_get_partition_names_ps_result() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -18035,14 +17728,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1388; - ::apache::thrift::protocol::TType _etype1391; - xfer += iprot->readListBegin(_etype1391, _size1388); - this->success.resize(_size1388); - uint32_t _i1392; - for (_i1392 = 0; _i1392 < _size1388; ++_i1392) + uint32_t _size1397; + ::apache::thrift::protocol::TType _etype1400; + xfer += iprot->readListBegin(_etype1400, _size1397); + this->success.resize(_size1397); + uint32_t _i1401; + for (_i1401 = 0; _i1401 < _size1397; ++_i1401) { - xfer += this->success[_i1392].read(iprot); + xfer += iprot->readString(this->success[_i1401]); } xfer += iprot->readListEnd(); } @@ -18079,20 +17772,20 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr return xfer; } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1393; - for (_iter1393 = this->success.begin(); _iter1393 != this->success.end(); ++_iter1393) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter1402; + for (_iter1402 = this->success.begin(); _iter1402 != this->success.end(); ++_iter1402) { - xfer += (*_iter1393).write(oprot); + xfer += oprot->writeString((*_iter1402)); } xfer += oprot->writeListEnd(); } @@ -18112,11 +17805,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th } -ThriftHiveMetastore_get_partitions_by_filter_presult::~ThriftHiveMetastore_get_partitions_by_filter_presult() throw() { +ThriftHiveMetastore_get_partition_names_ps_presult::~ThriftHiveMetastore_get_partition_names_ps_presult() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -18141,14 +17834,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1394; - ::apache::thrift::protocol::TType _etype1397; - xfer += iprot->readListBegin(_etype1397, _size1394); - (*(this->success)).resize(_size1394); - uint32_t _i1398; - for (_i1398 = 0; _i1398 < _size1394; ++_i1398) + uint32_t _size1403; + ::apache::thrift::protocol::TType _etype1406; + xfer += iprot->readListBegin(_etype1406, _size1403); + (*(this->success)).resize(_size1403); + uint32_t _i1407; + for (_i1407 = 0; _i1407 < _size1403; ++_i1407) { - xfer += (*(this->success))[_i1398].read(iprot); + xfer += iprot->readString((*(this->success))[_i1407]); } xfer += iprot->readListEnd(); } @@ -18186,11 +17879,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th } -ThriftHiveMetastore_get_part_specs_by_filter_args::~ThriftHiveMetastore_get_part_specs_by_filter_args() throw() { +ThriftHiveMetastore_get_partitions_by_filter_args::~ThriftHiveMetastore_get_partitions_by_filter_args() throw() { } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -18236,8 +17929,8 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::read(::apache::thrif } break; case 4: - if (ftype == ::apache::thrift::protocol::T_I32) { - xfer += iprot->readI32(this->max_parts); + if (ftype == ::apache::thrift::protocol::T_I16) { + xfer += iprot->readI16(this->max_parts); this->__isset.max_parts = true; } else { xfer += iprot->skip(ftype); @@ -18255,10 +17948,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_args"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); @@ -18272,8 +17965,8 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::write(::apache::thri xfer += oprot->writeString(this->filter); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 4); - xfer += oprot->writeI32(this->max_parts); + xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); + xfer += oprot->writeI16(this->max_parts); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -18282,14 +17975,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::write(::apache::thri } -ThriftHiveMetastore_get_part_specs_by_filter_pargs::~ThriftHiveMetastore_get_part_specs_by_filter_pargs() throw() { +ThriftHiveMetastore_get_partitions_by_filter_pargs::~ThriftHiveMetastore_get_partitions_by_filter_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_pargs"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); @@ -18303,8 +17996,8 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_pargs::write(::apache::thr xfer += oprot->writeString((*(this->filter))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 4); - xfer += oprot->writeI32((*(this->max_parts))); + xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); + xfer += oprot->writeI16((*(this->max_parts))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -18313,11 +18006,11 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_pargs::write(::apache::thr } -ThriftHiveMetastore_get_part_specs_by_filter_result::~ThriftHiveMetastore_get_part_specs_by_filter_result() throw() { +ThriftHiveMetastore_get_partitions_by_filter_result::~ThriftHiveMetastore_get_partitions_by_filter_result() throw() { } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -18342,14 +18035,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1399; - ::apache::thrift::protocol::TType _etype1402; - xfer += iprot->readListBegin(_etype1402, _size1399); - this->success.resize(_size1399); - uint32_t _i1403; - for (_i1403 = 0; _i1403 < _size1399; ++_i1403) + uint32_t _size1408; + ::apache::thrift::protocol::TType _etype1411; + xfer += iprot->readListBegin(_etype1411, _size1408); + this->success.resize(_size1408); + uint32_t _i1412; + for (_i1412 = 0; _i1412 < _size1408; ++_i1412) { - xfer += this->success[_i1403].read(iprot); + xfer += this->success[_i1412].read(iprot); } xfer += iprot->readListEnd(); } @@ -18386,20 +18079,20 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr return xfer; } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1404; - for (_iter1404 = this->success.begin(); _iter1404 != this->success.end(); ++_iter1404) + std::vector ::const_iterator _iter1413; + for (_iter1413 = this->success.begin(); _iter1413 != this->success.end(); ++_iter1413) { - xfer += (*_iter1404).write(oprot); + xfer += (*_iter1413).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18419,11 +18112,11 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th } -ThriftHiveMetastore_get_part_specs_by_filter_presult::~ThriftHiveMetastore_get_part_specs_by_filter_presult() throw() { +ThriftHiveMetastore_get_partitions_by_filter_presult::~ThriftHiveMetastore_get_partitions_by_filter_presult() throw() { } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -18448,14 +18141,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1405; - ::apache::thrift::protocol::TType _etype1408; - xfer += iprot->readListBegin(_etype1408, _size1405); - (*(this->success)).resize(_size1405); - uint32_t _i1409; - for (_i1409 = 0; _i1409 < _size1405; ++_i1409) + uint32_t _size1414; + ::apache::thrift::protocol::TType _etype1417; + xfer += iprot->readListBegin(_etype1417, _size1414); + (*(this->success)).resize(_size1414); + uint32_t _i1418; + for (_i1418 = 0; _i1418 < _size1414; ++_i1418) { - xfer += (*(this->success))[_i1409].read(iprot); + xfer += (*(this->success))[_i1418].read(iprot); } xfer += iprot->readListEnd(); } @@ -18493,11 +18186,11 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th } -ThriftHiveMetastore_get_partitions_by_expr_args::~ThriftHiveMetastore_get_partitions_by_expr_args() throw() { +ThriftHiveMetastore_get_part_specs_by_filter_args::~ThriftHiveMetastore_get_part_specs_by_filter_args() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -18519,9 +18212,33 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::read(::apache::thrift: switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->req.read(iprot); - this->__isset.req = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + this->__isset.tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->filter); + this->__isset.filter = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->max_parts); + this->__isset.max_parts = true; } else { xfer += iprot->skip(ftype); } @@ -18538,13 +18255,25 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::read(::apache::thrift: return xfer; } -uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_args"); - xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->req.write(oprot); + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->filter); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 4); + xfer += oprot->writeI32(this->max_parts); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -18553,17 +18282,29 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::write(::apache::thrift } -ThriftHiveMetastore_get_partitions_by_expr_pargs::~ThriftHiveMetastore_get_partitions_by_expr_pargs() throw() { +ThriftHiveMetastore_get_part_specs_by_filter_pargs::~ThriftHiveMetastore_get_part_specs_by_filter_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_expr_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_pargs"); - xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->tbl_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString((*(this->filter))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 4); + xfer += oprot->writeI32((*(this->max_parts))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -18572,11 +18313,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_pargs::write(::apache::thrif } -ThriftHiveMetastore_get_partitions_by_expr_result::~ThriftHiveMetastore_get_partitions_by_expr_result() throw() { +ThriftHiveMetastore_get_part_specs_by_filter_result::~ThriftHiveMetastore_get_part_specs_by_filter_result() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_expr_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -18598,8 +18339,20 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_result::read(::apache::thrif switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1419; + ::apache::thrift::protocol::TType _etype1422; + xfer += iprot->readListBegin(_etype1422, _size1419); + this->success.resize(_size1419); + uint32_t _i1423; + for (_i1423 = 0; _i1423 < _size1419; ++_i1423) + { + xfer += this->success[_i1423].read(iprot); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -18633,15 +18386,23 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_result::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_get_partitions_by_expr_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1424; + for (_iter1424 = this->success.begin(); _iter1424 != this->success.end(); ++_iter1424) + { + xfer += (*_iter1424).write(oprot); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -18658,11 +18419,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_result::write(::apache::thri } -ThriftHiveMetastore_get_partitions_by_expr_presult::~ThriftHiveMetastore_get_partitions_by_expr_presult() throw() { +ThriftHiveMetastore_get_part_specs_by_filter_presult::~ThriftHiveMetastore_get_part_specs_by_filter_presult() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_expr_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -18684,8 +18445,20 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_presult::read(::apache::thri switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1425; + ::apache::thrift::protocol::TType _etype1428; + xfer += iprot->readListBegin(_etype1428, _size1425); + (*(this->success)).resize(_size1425); + uint32_t _i1429; + for (_i1429 = 0; _i1429 < _size1425; ++_i1429) + { + xfer += (*(this->success))[_i1429].read(iprot); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -18720,11 +18493,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_presult::read(::apache::thri } -ThriftHiveMetastore_get_num_partitions_by_filter_args::~ThriftHiveMetastore_get_num_partitions_by_filter_args() throw() { +ThriftHiveMetastore_get_partitions_by_expr_args::~ThriftHiveMetastore_get_partitions_by_expr_args() throw() { } -uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -18746,25 +18519,9 @@ uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_args::read(::apache::t switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->tbl_name); - this->__isset.tbl_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->filter); - this->__isset.filter = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; } else { xfer += iprot->skip(ftype); } @@ -18781,21 +18538,13 @@ uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_args::read(::apache::t return xfer; } -uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_num_partitions_by_filter_args"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->tbl_name); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_args"); - xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString(this->filter); + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -18804,25 +18553,17 @@ uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_args::write(::apache:: } -ThriftHiveMetastore_get_num_partitions_by_filter_pargs::~ThriftHiveMetastore_get_num_partitions_by_filter_pargs() throw() { +ThriftHiveMetastore_get_partitions_by_expr_pargs::~ThriftHiveMetastore_get_partitions_by_expr_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_expr_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_num_partitions_by_filter_pargs"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->tbl_name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_pargs"); - xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString((*(this->filter))); + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -18831,11 +18572,11 @@ uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_pargs::write(::apache: } -ThriftHiveMetastore_get_num_partitions_by_filter_result::~ThriftHiveMetastore_get_num_partitions_by_filter_result() throw() { +ThriftHiveMetastore_get_partitions_by_expr_result::~ThriftHiveMetastore_get_partitions_by_expr_result() throw() { } -uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_expr_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -18857,8 +18598,8 @@ uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_result::read(::apache: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_I32) { - xfer += iprot->readI32(this->success); + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -18892,15 +18633,15 @@ uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_result::read(::apache: return xfer; } -uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_expr_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_num_partitions_by_filter_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_I32, 0); - xfer += oprot->writeI32(this->success); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -18917,11 +18658,11 @@ uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_result::write(::apache } -ThriftHiveMetastore_get_num_partitions_by_filter_presult::~ThriftHiveMetastore_get_num_partitions_by_filter_presult() throw() { +ThriftHiveMetastore_get_partitions_by_expr_presult::~ThriftHiveMetastore_get_partitions_by_expr_presult() throw() { } -uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_expr_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -18943,8 +18684,8 @@ uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_presult::read(::apache switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_I32) { - xfer += iprot->readI32((*(this->success))); + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -18979,11 +18720,11 @@ uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_presult::read(::apache } -ThriftHiveMetastore_get_partitions_by_names_args::~ThriftHiveMetastore_get_partitions_by_names_args() throw() { +ThriftHiveMetastore_get_num_partitions_by_filter_args::~ThriftHiveMetastore_get_num_partitions_by_filter_args() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -19021,21 +18762,9 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift } break; case 3: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->names.clear(); - uint32_t _size1410; - ::apache::thrift::protocol::TType _etype1413; - xfer += iprot->readListBegin(_etype1413, _size1410); - this->names.resize(_size1410); - uint32_t _i1414; - for (_i1414 = 0; _i1414 < _size1410; ++_i1414) - { - xfer += iprot->readString(this->names[_i1414]); - } - xfer += iprot->readListEnd(); - } - this->__isset.names = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->filter); + this->__isset.filter = true; } else { xfer += iprot->skip(ftype); } @@ -19052,10 +18781,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift return xfer; } -uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_names_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_num_partitions_by_filter_args"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); @@ -19065,16 +18794,8 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter1415; - for (_iter1415 = this->names.begin(); _iter1415 != this->names.end(); ++_iter1415) - { - xfer += oprot->writeString((*_iter1415)); - } - xfer += oprot->writeListEnd(); - } + xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->filter); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -19083,14 +18804,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif } -ThriftHiveMetastore_get_partitions_by_names_pargs::~ThriftHiveMetastore_get_partitions_by_names_pargs() throw() { +ThriftHiveMetastore_get_num_partitions_by_filter_pargs::~ThriftHiveMetastore_get_num_partitions_by_filter_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_names_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_num_partitions_by_filter_pargs"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); @@ -19100,16 +18821,8 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter1416; - for (_iter1416 = (*(this->names)).begin(); _iter1416 != (*(this->names)).end(); ++_iter1416) - { - xfer += oprot->writeString((*_iter1416)); - } - xfer += oprot->writeListEnd(); - } + xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString((*(this->filter))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -19118,11 +18831,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri } -ThriftHiveMetastore_get_partitions_by_names_result::~ThriftHiveMetastore_get_partitions_by_names_result() throw() { +ThriftHiveMetastore_get_num_partitions_by_filter_result::~ThriftHiveMetastore_get_num_partitions_by_filter_result() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -19144,20 +18857,8 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1417; - ::apache::thrift::protocol::TType _etype1420; - xfer += iprot->readListBegin(_etype1420, _size1417); - this->success.resize(_size1417); - uint32_t _i1421; - for (_i1421 = 0; _i1421 < _size1417; ++_i1421) - { - xfer += this->success[_i1421].read(iprot); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->success); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -19191,23 +18892,15 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri return xfer; } -uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_names_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_num_partitions_by_filter_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1422; - for (_iter1422 = this->success.begin(); _iter1422 != this->success.end(); ++_iter1422) - { - xfer += (*_iter1422).write(oprot); - } - xfer += oprot->writeListEnd(); - } + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_I32, 0); + xfer += oprot->writeI32(this->success); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -19224,11 +18917,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr } -ThriftHiveMetastore_get_partitions_by_names_presult::~ThriftHiveMetastore_get_partitions_by_names_presult() throw() { +ThriftHiveMetastore_get_num_partitions_by_filter_presult::~ThriftHiveMetastore_get_num_partitions_by_filter_presult() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_num_partitions_by_filter_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -19250,20 +18943,8 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1423; - ::apache::thrift::protocol::TType _etype1426; - xfer += iprot->readListBegin(_etype1426, _size1423); - (*(this->success)).resize(_size1423); - uint32_t _i1427; - for (_i1427 = 0; _i1427 < _size1423; ++_i1427) - { - xfer += (*(this->success))[_i1427].read(iprot); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32((*(this->success))); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -19298,11 +18979,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr } -ThriftHiveMetastore_alter_partition_args::~ThriftHiveMetastore_alter_partition_args() throw() { +ThriftHiveMetastore_get_partitions_by_names_args::~ThriftHiveMetastore_get_partitions_by_names_args() throw() { } -uint32_t ThriftHiveMetastore_alter_partition_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -19340,9 +19021,21 @@ uint32_t ThriftHiveMetastore_alter_partition_args::read(::apache::thrift::protoc } break; case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->new_part.read(iprot); - this->__isset.new_part = true; + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->names.clear(); + uint32_t _size1430; + ::apache::thrift::protocol::TType _etype1433; + xfer += iprot->readListBegin(_etype1433, _size1430); + this->names.resize(_size1430); + uint32_t _i1434; + for (_i1434 = 0; _i1434 < _size1430; ++_i1434) + { + xfer += iprot->readString(this->names[_i1434]); + } + xfer += iprot->readListEnd(); + } + this->__isset.names = true; } else { xfer += iprot->skip(ftype); } @@ -19359,10 +19052,10 @@ uint32_t ThriftHiveMetastore_alter_partition_args::read(::apache::thrift::protoc return xfer; } -uint32_t ThriftHiveMetastore_alter_partition_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partition_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_names_args"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); @@ -19372,8 +19065,16 @@ uint32_t ThriftHiveMetastore_alter_partition_args::write(::apache::thrift::proto xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->new_part.write(oprot); + xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); + std::vector ::const_iterator _iter1435; + for (_iter1435 = this->names.begin(); _iter1435 != this->names.end(); ++_iter1435) + { + xfer += oprot->writeString((*_iter1435)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -19382,14 +19083,14 @@ uint32_t ThriftHiveMetastore_alter_partition_args::write(::apache::thrift::proto } -ThriftHiveMetastore_alter_partition_pargs::~ThriftHiveMetastore_alter_partition_pargs() throw() { +ThriftHiveMetastore_get_partitions_by_names_pargs::~ThriftHiveMetastore_get_partitions_by_names_pargs() throw() { } -uint32_t ThriftHiveMetastore_alter_partition_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partition_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_names_pargs"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); @@ -19399,8 +19100,16 @@ uint32_t ThriftHiveMetastore_alter_partition_pargs::write(::apache::thrift::prot xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += (*(this->new_part)).write(oprot); + xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); + std::vector ::const_iterator _iter1436; + for (_iter1436 = (*(this->names)).begin(); _iter1436 != (*(this->names)).end(); ++_iter1436) + { + xfer += oprot->writeString((*_iter1436)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -19409,11 +19118,11 @@ uint32_t ThriftHiveMetastore_alter_partition_pargs::write(::apache::thrift::prot } -ThriftHiveMetastore_alter_partition_result::~ThriftHiveMetastore_alter_partition_result() throw() { +ThriftHiveMetastore_get_partitions_by_names_result::~ThriftHiveMetastore_get_partitions_by_names_result() throw() { } -uint32_t ThriftHiveMetastore_alter_partition_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -19434,6 +19143,26 @@ uint32_t ThriftHiveMetastore_alter_partition_result::read(::apache::thrift::prot } switch (fid) { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1437; + ::apache::thrift::protocol::TType _etype1440; + xfer += iprot->readListBegin(_etype1440, _size1437); + this->success.resize(_size1437); + uint32_t _i1441; + for (_i1441 = 0; _i1441 < _size1437; ++_i1441) + { + xfer += this->success[_i1441].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -19462,13 +19191,25 @@ uint32_t ThriftHiveMetastore_alter_partition_result::read(::apache::thrift::prot return xfer; } -uint32_t ThriftHiveMetastore_alter_partition_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partition_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_names_result"); - if (this->__isset.o1) { + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1442; + for (_iter1442 = this->success.begin(); _iter1442 != this->success.end(); ++_iter1442) + { + xfer += (*_iter1442).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); @@ -19483,11 +19224,270 @@ uint32_t ThriftHiveMetastore_alter_partition_result::write(::apache::thrift::pro } -ThriftHiveMetastore_alter_partition_presult::~ThriftHiveMetastore_alter_partition_presult() throw() { +ThriftHiveMetastore_get_partitions_by_names_presult::~ThriftHiveMetastore_get_partitions_by_names_presult() throw() { } -uint32_t ThriftHiveMetastore_alter_partition_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1443; + ::apache::thrift::protocol::TType _etype1446; + xfer += iprot->readListBegin(_etype1446, _size1443); + (*(this->success)).resize(_size1443); + uint32_t _i1447; + for (_i1447 = 0; _i1447 < _size1443; ++_i1447) + { + xfer += (*(this->success))[_i1447].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_alter_partition_args::~ThriftHiveMetastore_alter_partition_args() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_partition_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + this->__isset.tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->new_part.read(iprot); + this->__isset.new_part = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_partition_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partition_args"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->new_part.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_alter_partition_pargs::~ThriftHiveMetastore_alter_partition_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_partition_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partition_pargs"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->tbl_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += (*(this->new_part)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_alter_partition_result::~ThriftHiveMetastore_alter_partition_result() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_partition_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_partition_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partition_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_alter_partition_presult::~ThriftHiveMetastore_alter_partition_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_partition_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -19582,14 +19582,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1428; - ::apache::thrift::protocol::TType _etype1431; - xfer += iprot->readListBegin(_etype1431, _size1428); - this->new_parts.resize(_size1428); - uint32_t _i1432; - for (_i1432 = 0; _i1432 < _size1428; ++_i1432) + uint32_t _size1448; + ::apache::thrift::protocol::TType _etype1451; + xfer += iprot->readListBegin(_etype1451, _size1448); + this->new_parts.resize(_size1448); + uint32_t _i1452; + for (_i1452 = 0; _i1452 < _size1448; ++_i1452) { - xfer += this->new_parts[_i1432].read(iprot); + xfer += this->new_parts[_i1452].read(iprot); } xfer += iprot->readListEnd(); } @@ -19626,10 +19626,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1433; - for (_iter1433 = this->new_parts.begin(); _iter1433 != this->new_parts.end(); ++_iter1433) + std::vector ::const_iterator _iter1453; + for (_iter1453 = this->new_parts.begin(); _iter1453 != this->new_parts.end(); ++_iter1453) { - xfer += (*_iter1433).write(oprot); + xfer += (*_iter1453).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19661,10 +19661,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1434; - for (_iter1434 = (*(this->new_parts)).begin(); _iter1434 != (*(this->new_parts)).end(); ++_iter1434) + std::vector ::const_iterator _iter1454; + for (_iter1454 = (*(this->new_parts)).begin(); _iter1454 != (*(this->new_parts)).end(); ++_iter1454) { - xfer += (*_iter1434).write(oprot); + xfer += (*_iter1454).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19849,14 +19849,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1435; - ::apache::thrift::protocol::TType _etype1438; - xfer += iprot->readListBegin(_etype1438, _size1435); - this->new_parts.resize(_size1435); - uint32_t _i1439; - for (_i1439 = 0; _i1439 < _size1435; ++_i1439) + uint32_t _size1455; + ::apache::thrift::protocol::TType _etype1458; + xfer += iprot->readListBegin(_etype1458, _size1455); + this->new_parts.resize(_size1455); + uint32_t _i1459; + for (_i1459 = 0; _i1459 < _size1455; ++_i1459) { - xfer += this->new_parts[_i1439].read(iprot); + xfer += this->new_parts[_i1459].read(iprot); } xfer += iprot->readListEnd(); } @@ -19901,10 +19901,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1440; - for (_iter1440 = this->new_parts.begin(); _iter1440 != this->new_parts.end(); ++_iter1440) + std::vector ::const_iterator _iter1460; + for (_iter1460 = this->new_parts.begin(); _iter1460 != this->new_parts.end(); ++_iter1460) { - xfer += (*_iter1440).write(oprot); + xfer += (*_iter1460).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19940,10 +19940,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1441; - for (_iter1441 = (*(this->new_parts)).begin(); _iter1441 != (*(this->new_parts)).end(); ++_iter1441) + std::vector ::const_iterator _iter1461; + for (_iter1461 = (*(this->new_parts)).begin(); _iter1461 != (*(this->new_parts)).end(); ++_iter1461) { - xfer += (*_iter1441).write(oprot); + xfer += (*_iter1461).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20387,14 +20387,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1442; - ::apache::thrift::protocol::TType _etype1445; - xfer += iprot->readListBegin(_etype1445, _size1442); - this->part_vals.resize(_size1442); - uint32_t _i1446; - for (_i1446 = 0; _i1446 < _size1442; ++_i1446) + uint32_t _size1462; + ::apache::thrift::protocol::TType _etype1465; + xfer += iprot->readListBegin(_etype1465, _size1462); + this->part_vals.resize(_size1462); + uint32_t _i1466; + for (_i1466 = 0; _i1466 < _size1462; ++_i1466) { - xfer += iprot->readString(this->part_vals[_i1446]); + xfer += iprot->readString(this->part_vals[_i1466]); } xfer += iprot->readListEnd(); } @@ -20439,10 +20439,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1447; - for (_iter1447 = this->part_vals.begin(); _iter1447 != this->part_vals.end(); ++_iter1447) + std::vector ::const_iterator _iter1467; + for (_iter1467 = this->part_vals.begin(); _iter1467 != this->part_vals.end(); ++_iter1467) { - xfer += oprot->writeString((*_iter1447)); + xfer += oprot->writeString((*_iter1467)); } xfer += oprot->writeListEnd(); } @@ -20478,10 +20478,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1448; - for (_iter1448 = (*(this->part_vals)).begin(); _iter1448 != (*(this->part_vals)).end(); ++_iter1448) + std::vector ::const_iterator _iter1468; + for (_iter1468 = (*(this->part_vals)).begin(); _iter1468 != (*(this->part_vals)).end(); ++_iter1468) { - xfer += oprot->writeString((*_iter1448)); + xfer += oprot->writeString((*_iter1468)); } xfer += oprot->writeListEnd(); } @@ -20654,14 +20654,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1449; - ::apache::thrift::protocol::TType _etype1452; - xfer += iprot->readListBegin(_etype1452, _size1449); - this->part_vals.resize(_size1449); - uint32_t _i1453; - for (_i1453 = 0; _i1453 < _size1449; ++_i1453) + uint32_t _size1469; + ::apache::thrift::protocol::TType _etype1472; + xfer += iprot->readListBegin(_etype1472, _size1469); + this->part_vals.resize(_size1469); + uint32_t _i1473; + for (_i1473 = 0; _i1473 < _size1469; ++_i1473) { - xfer += iprot->readString(this->part_vals[_i1453]); + xfer += iprot->readString(this->part_vals[_i1473]); } xfer += iprot->readListEnd(); } @@ -20698,10 +20698,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1454; - for (_iter1454 = this->part_vals.begin(); _iter1454 != this->part_vals.end(); ++_iter1454) + std::vector ::const_iterator _iter1474; + for (_iter1474 = this->part_vals.begin(); _iter1474 != this->part_vals.end(); ++_iter1474) { - xfer += oprot->writeString((*_iter1454)); + xfer += oprot->writeString((*_iter1474)); } xfer += oprot->writeListEnd(); } @@ -20729,10 +20729,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1455; - for (_iter1455 = (*(this->part_vals)).begin(); _iter1455 != (*(this->part_vals)).end(); ++_iter1455) + std::vector ::const_iterator _iter1475; + for (_iter1475 = (*(this->part_vals)).begin(); _iter1475 != (*(this->part_vals)).end(); ++_iter1475) { - xfer += oprot->writeString((*_iter1455)); + xfer += oprot->writeString((*_iter1475)); } xfer += oprot->writeListEnd(); } @@ -21207,14 +21207,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1456; - ::apache::thrift::protocol::TType _etype1459; - xfer += iprot->readListBegin(_etype1459, _size1456); - this->success.resize(_size1456); - uint32_t _i1460; - for (_i1460 = 0; _i1460 < _size1456; ++_i1460) + uint32_t _size1476; + ::apache::thrift::protocol::TType _etype1479; + xfer += iprot->readListBegin(_etype1479, _size1476); + this->success.resize(_size1476); + uint32_t _i1480; + for (_i1480 = 0; _i1480 < _size1476; ++_i1480) { - xfer += iprot->readString(this->success[_i1460]); + xfer += iprot->readString(this->success[_i1480]); } xfer += iprot->readListEnd(); } @@ -21253,10 +21253,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1461; - for (_iter1461 = this->success.begin(); _iter1461 != this->success.end(); ++_iter1461) + std::vector ::const_iterator _iter1481; + for (_iter1481 = this->success.begin(); _iter1481 != this->success.end(); ++_iter1481) { - xfer += oprot->writeString((*_iter1461)); + xfer += oprot->writeString((*_iter1481)); } xfer += oprot->writeListEnd(); } @@ -21301,14 +21301,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1462; - ::apache::thrift::protocol::TType _etype1465; - xfer += iprot->readListBegin(_etype1465, _size1462); - (*(this->success)).resize(_size1462); - uint32_t _i1466; - for (_i1466 = 0; _i1466 < _size1462; ++_i1466) + uint32_t _size1482; + ::apache::thrift::protocol::TType _etype1485; + xfer += iprot->readListBegin(_etype1485, _size1482); + (*(this->success)).resize(_size1482); + uint32_t _i1486; + for (_i1486 = 0; _i1486 < _size1482; ++_i1486) { - xfer += iprot->readString((*(this->success))[_i1466]); + xfer += iprot->readString((*(this->success))[_i1486]); } xfer += iprot->readListEnd(); } @@ -21446,17 +21446,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1467; - ::apache::thrift::protocol::TType _ktype1468; - ::apache::thrift::protocol::TType _vtype1469; - xfer += iprot->readMapBegin(_ktype1468, _vtype1469, _size1467); - uint32_t _i1471; - for (_i1471 = 0; _i1471 < _size1467; ++_i1471) + uint32_t _size1487; + ::apache::thrift::protocol::TType _ktype1488; + ::apache::thrift::protocol::TType _vtype1489; + xfer += iprot->readMapBegin(_ktype1488, _vtype1489, _size1487); + uint32_t _i1491; + for (_i1491 = 0; _i1491 < _size1487; ++_i1491) { - std::string _key1472; - xfer += iprot->readString(_key1472); - std::string& _val1473 = this->success[_key1472]; - xfer += iprot->readString(_val1473); + std::string _key1492; + xfer += iprot->readString(_key1492); + std::string& _val1493 = this->success[_key1492]; + xfer += iprot->readString(_val1493); } xfer += iprot->readMapEnd(); } @@ -21495,11 +21495,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1474; - for (_iter1474 = this->success.begin(); _iter1474 != this->success.end(); ++_iter1474) + std::map ::const_iterator _iter1494; + for (_iter1494 = this->success.begin(); _iter1494 != this->success.end(); ++_iter1494) { - xfer += oprot->writeString(_iter1474->first); - xfer += oprot->writeString(_iter1474->second); + xfer += oprot->writeString(_iter1494->first); + xfer += oprot->writeString(_iter1494->second); } xfer += oprot->writeMapEnd(); } @@ -21544,17 +21544,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1475; - ::apache::thrift::protocol::TType _ktype1476; - ::apache::thrift::protocol::TType _vtype1477; - xfer += iprot->readMapBegin(_ktype1476, _vtype1477, _size1475); - uint32_t _i1479; - for (_i1479 = 0; _i1479 < _size1475; ++_i1479) + uint32_t _size1495; + ::apache::thrift::protocol::TType _ktype1496; + ::apache::thrift::protocol::TType _vtype1497; + xfer += iprot->readMapBegin(_ktype1496, _vtype1497, _size1495); + uint32_t _i1499; + for (_i1499 = 0; _i1499 < _size1495; ++_i1499) { - std::string _key1480; - xfer += iprot->readString(_key1480); - std::string& _val1481 = (*(this->success))[_key1480]; - xfer += iprot->readString(_val1481); + std::string _key1500; + xfer += iprot->readString(_key1500); + std::string& _val1501 = (*(this->success))[_key1500]; + xfer += iprot->readString(_val1501); } xfer += iprot->readMapEnd(); } @@ -21629,17 +21629,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1482; - ::apache::thrift::protocol::TType _ktype1483; - ::apache::thrift::protocol::TType _vtype1484; - xfer += iprot->readMapBegin(_ktype1483, _vtype1484, _size1482); - uint32_t _i1486; - for (_i1486 = 0; _i1486 < _size1482; ++_i1486) + uint32_t _size1502; + ::apache::thrift::protocol::TType _ktype1503; + ::apache::thrift::protocol::TType _vtype1504; + xfer += iprot->readMapBegin(_ktype1503, _vtype1504, _size1502); + uint32_t _i1506; + for (_i1506 = 0; _i1506 < _size1502; ++_i1506) { - std::string _key1487; - xfer += iprot->readString(_key1487); - std::string& _val1488 = this->part_vals[_key1487]; - xfer += iprot->readString(_val1488); + std::string _key1507; + xfer += iprot->readString(_key1507); + std::string& _val1508 = this->part_vals[_key1507]; + xfer += iprot->readString(_val1508); } xfer += iprot->readMapEnd(); } @@ -21650,9 +21650,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1489; - xfer += iprot->readI32(ecast1489); - this->eventType = (PartitionEventType::type)ecast1489; + int32_t ecast1509; + xfer += iprot->readI32(ecast1509); + this->eventType = (PartitionEventType::type)ecast1509; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -21686,11 +21686,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1490; - for (_iter1490 = this->part_vals.begin(); _iter1490 != this->part_vals.end(); ++_iter1490) + std::map ::const_iterator _iter1510; + for (_iter1510 = this->part_vals.begin(); _iter1510 != this->part_vals.end(); ++_iter1510) { - xfer += oprot->writeString(_iter1490->first); - xfer += oprot->writeString(_iter1490->second); + xfer += oprot->writeString(_iter1510->first); + xfer += oprot->writeString(_iter1510->second); } xfer += oprot->writeMapEnd(); } @@ -21726,11 +21726,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1491; - for (_iter1491 = (*(this->part_vals)).begin(); _iter1491 != (*(this->part_vals)).end(); ++_iter1491) + std::map ::const_iterator _iter1511; + for (_iter1511 = (*(this->part_vals)).begin(); _iter1511 != (*(this->part_vals)).end(); ++_iter1511) { - xfer += oprot->writeString(_iter1491->first); - xfer += oprot->writeString(_iter1491->second); + xfer += oprot->writeString(_iter1511->first); + xfer += oprot->writeString(_iter1511->second); } xfer += oprot->writeMapEnd(); } @@ -21999,17 +21999,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1492; - ::apache::thrift::protocol::TType _ktype1493; - ::apache::thrift::protocol::TType _vtype1494; - xfer += iprot->readMapBegin(_ktype1493, _vtype1494, _size1492); - uint32_t _i1496; - for (_i1496 = 0; _i1496 < _size1492; ++_i1496) + uint32_t _size1512; + ::apache::thrift::protocol::TType _ktype1513; + ::apache::thrift::protocol::TType _vtype1514; + xfer += iprot->readMapBegin(_ktype1513, _vtype1514, _size1512); + uint32_t _i1516; + for (_i1516 = 0; _i1516 < _size1512; ++_i1516) { - std::string _key1497; - xfer += iprot->readString(_key1497); - std::string& _val1498 = this->part_vals[_key1497]; - xfer += iprot->readString(_val1498); + std::string _key1517; + xfer += iprot->readString(_key1517); + std::string& _val1518 = this->part_vals[_key1517]; + xfer += iprot->readString(_val1518); } xfer += iprot->readMapEnd(); } @@ -22020,9 +22020,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1499; - xfer += iprot->readI32(ecast1499); - this->eventType = (PartitionEventType::type)ecast1499; + int32_t ecast1519; + xfer += iprot->readI32(ecast1519); + this->eventType = (PartitionEventType::type)ecast1519; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -22056,11 +22056,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1500; - for (_iter1500 = this->part_vals.begin(); _iter1500 != this->part_vals.end(); ++_iter1500) + std::map ::const_iterator _iter1520; + for (_iter1520 = this->part_vals.begin(); _iter1520 != this->part_vals.end(); ++_iter1520) { - xfer += oprot->writeString(_iter1500->first); - xfer += oprot->writeString(_iter1500->second); + xfer += oprot->writeString(_iter1520->first); + xfer += oprot->writeString(_iter1520->second); } xfer += oprot->writeMapEnd(); } @@ -22096,11 +22096,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1501; - for (_iter1501 = (*(this->part_vals)).begin(); _iter1501 != (*(this->part_vals)).end(); ++_iter1501) + std::map ::const_iterator _iter1521; + for (_iter1521 = (*(this->part_vals)).begin(); _iter1521 != (*(this->part_vals)).end(); ++_iter1521) { - xfer += oprot->writeString(_iter1501->first); - xfer += oprot->writeString(_iter1501->second); + xfer += oprot->writeString(_iter1521->first); + xfer += oprot->writeString(_iter1521->second); } xfer += oprot->writeMapEnd(); } @@ -23536,14 +23536,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1502; - ::apache::thrift::protocol::TType _etype1505; - xfer += iprot->readListBegin(_etype1505, _size1502); - this->success.resize(_size1502); - uint32_t _i1506; - for (_i1506 = 0; _i1506 < _size1502; ++_i1506) + uint32_t _size1522; + ::apache::thrift::protocol::TType _etype1525; + xfer += iprot->readListBegin(_etype1525, _size1522); + this->success.resize(_size1522); + uint32_t _i1526; + for (_i1526 = 0; _i1526 < _size1522; ++_i1526) { - xfer += this->success[_i1506].read(iprot); + xfer += this->success[_i1526].read(iprot); } xfer += iprot->readListEnd(); } @@ -23590,10 +23590,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1507; - for (_iter1507 = this->success.begin(); _iter1507 != this->success.end(); ++_iter1507) + std::vector ::const_iterator _iter1527; + for (_iter1527 = this->success.begin(); _iter1527 != this->success.end(); ++_iter1527) { - xfer += (*_iter1507).write(oprot); + xfer += (*_iter1527).write(oprot); } xfer += oprot->writeListEnd(); } @@ -23642,14 +23642,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1508; - ::apache::thrift::protocol::TType _etype1511; - xfer += iprot->readListBegin(_etype1511, _size1508); - (*(this->success)).resize(_size1508); - uint32_t _i1512; - for (_i1512 = 0; _i1512 < _size1508; ++_i1512) + uint32_t _size1528; + ::apache::thrift::protocol::TType _etype1531; + xfer += iprot->readListBegin(_etype1531, _size1528); + (*(this->success)).resize(_size1528); + uint32_t _i1532; + for (_i1532 = 0; _i1532 < _size1528; ++_i1532) { - xfer += (*(this->success))[_i1512].read(iprot); + xfer += (*(this->success))[_i1532].read(iprot); } xfer += iprot->readListEnd(); } @@ -23827,14 +23827,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1513; - ::apache::thrift::protocol::TType _etype1516; - xfer += iprot->readListBegin(_etype1516, _size1513); - this->success.resize(_size1513); - uint32_t _i1517; - for (_i1517 = 0; _i1517 < _size1513; ++_i1517) + uint32_t _size1533; + ::apache::thrift::protocol::TType _etype1536; + xfer += iprot->readListBegin(_etype1536, _size1533); + this->success.resize(_size1533); + uint32_t _i1537; + for (_i1537 = 0; _i1537 < _size1533; ++_i1537) { - xfer += iprot->readString(this->success[_i1517]); + xfer += iprot->readString(this->success[_i1537]); } xfer += iprot->readListEnd(); } @@ -23873,10 +23873,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1518; - for (_iter1518 = this->success.begin(); _iter1518 != this->success.end(); ++_iter1518) + std::vector ::const_iterator _iter1538; + for (_iter1538 = this->success.begin(); _iter1538 != this->success.end(); ++_iter1538) { - xfer += oprot->writeString((*_iter1518)); + xfer += oprot->writeString((*_iter1538)); } xfer += oprot->writeListEnd(); } @@ -23921,14 +23921,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1519; - ::apache::thrift::protocol::TType _etype1522; - xfer += iprot->readListBegin(_etype1522, _size1519); - (*(this->success)).resize(_size1519); - uint32_t _i1523; - for (_i1523 = 0; _i1523 < _size1519; ++_i1523) + uint32_t _size1539; + ::apache::thrift::protocol::TType _etype1542; + xfer += iprot->readListBegin(_etype1542, _size1539); + (*(this->success)).resize(_size1539); + uint32_t _i1543; + for (_i1543 = 0; _i1543 < _size1539; ++_i1543) { - xfer += iprot->readString((*(this->success))[_i1523]); + xfer += iprot->readString((*(this->success))[_i1543]); } xfer += iprot->readListEnd(); } @@ -28409,14 +28409,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1524; - ::apache::thrift::protocol::TType _etype1527; - xfer += iprot->readListBegin(_etype1527, _size1524); - this->success.resize(_size1524); - uint32_t _i1528; - for (_i1528 = 0; _i1528 < _size1524; ++_i1528) + uint32_t _size1544; + ::apache::thrift::protocol::TType _etype1547; + xfer += iprot->readListBegin(_etype1547, _size1544); + this->success.resize(_size1544); + uint32_t _i1548; + for (_i1548 = 0; _i1548 < _size1544; ++_i1548) { - xfer += iprot->readString(this->success[_i1528]); + xfer += iprot->readString(this->success[_i1548]); } xfer += iprot->readListEnd(); } @@ -28455,10 +28455,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1529; - for (_iter1529 = this->success.begin(); _iter1529 != this->success.end(); ++_iter1529) + std::vector ::const_iterator _iter1549; + for (_iter1549 = this->success.begin(); _iter1549 != this->success.end(); ++_iter1549) { - xfer += oprot->writeString((*_iter1529)); + xfer += oprot->writeString((*_iter1549)); } xfer += oprot->writeListEnd(); } @@ -28503,14 +28503,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1530; - ::apache::thrift::protocol::TType _etype1533; - xfer += iprot->readListBegin(_etype1533, _size1530); - (*(this->success)).resize(_size1530); - uint32_t _i1534; - for (_i1534 = 0; _i1534 < _size1530; ++_i1534) + uint32_t _size1550; + ::apache::thrift::protocol::TType _etype1553; + xfer += iprot->readListBegin(_etype1553, _size1550); + (*(this->success)).resize(_size1550); + uint32_t _i1554; + for (_i1554 = 0; _i1554 < _size1550; ++_i1554) { - xfer += iprot->readString((*(this->success))[_i1534]); + xfer += iprot->readString((*(this->success))[_i1554]); } xfer += iprot->readListEnd(); } @@ -29470,14 +29470,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1535; - ::apache::thrift::protocol::TType _etype1538; - xfer += iprot->readListBegin(_etype1538, _size1535); - this->success.resize(_size1535); - uint32_t _i1539; - for (_i1539 = 0; _i1539 < _size1535; ++_i1539) + uint32_t _size1555; + ::apache::thrift::protocol::TType _etype1558; + xfer += iprot->readListBegin(_etype1558, _size1555); + this->success.resize(_size1555); + uint32_t _i1559; + for (_i1559 = 0; _i1559 < _size1555; ++_i1559) { - xfer += iprot->readString(this->success[_i1539]); + xfer += iprot->readString(this->success[_i1559]); } xfer += iprot->readListEnd(); } @@ -29516,10 +29516,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1540; - for (_iter1540 = this->success.begin(); _iter1540 != this->success.end(); ++_iter1540) + std::vector ::const_iterator _iter1560; + for (_iter1560 = this->success.begin(); _iter1560 != this->success.end(); ++_iter1560) { - xfer += oprot->writeString((*_iter1540)); + xfer += oprot->writeString((*_iter1560)); } xfer += oprot->writeListEnd(); } @@ -29564,14 +29564,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1541; - ::apache::thrift::protocol::TType _etype1544; - xfer += iprot->readListBegin(_etype1544, _size1541); - (*(this->success)).resize(_size1541); - uint32_t _i1545; - for (_i1545 = 0; _i1545 < _size1541; ++_i1545) + uint32_t _size1561; + ::apache::thrift::protocol::TType _etype1564; + xfer += iprot->readListBegin(_etype1564, _size1561); + (*(this->success)).resize(_size1561); + uint32_t _i1565; + for (_i1565 = 0; _i1565 < _size1561; ++_i1565) { - xfer += iprot->readString((*(this->success))[_i1545]); + xfer += iprot->readString((*(this->success))[_i1565]); } xfer += iprot->readListEnd(); } @@ -29644,9 +29644,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1546; - xfer += iprot->readI32(ecast1546); - this->principal_type = (PrincipalType::type)ecast1546; + int32_t ecast1566; + xfer += iprot->readI32(ecast1566); + this->principal_type = (PrincipalType::type)ecast1566; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -29662,9 +29662,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1547; - xfer += iprot->readI32(ecast1547); - this->grantorType = (PrincipalType::type)ecast1547; + int32_t ecast1567; + xfer += iprot->readI32(ecast1567); + this->grantorType = (PrincipalType::type)ecast1567; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -29935,9 +29935,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1548; - xfer += iprot->readI32(ecast1548); - this->principal_type = (PrincipalType::type)ecast1548; + int32_t ecast1568; + xfer += iprot->readI32(ecast1568); + this->principal_type = (PrincipalType::type)ecast1568; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30168,9 +30168,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1549; - xfer += iprot->readI32(ecast1549); - this->principal_type = (PrincipalType::type)ecast1549; + int32_t ecast1569; + xfer += iprot->readI32(ecast1569); + this->principal_type = (PrincipalType::type)ecast1569; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30259,14 +30259,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1550; - ::apache::thrift::protocol::TType _etype1553; - xfer += iprot->readListBegin(_etype1553, _size1550); - this->success.resize(_size1550); - uint32_t _i1554; - for (_i1554 = 0; _i1554 < _size1550; ++_i1554) + uint32_t _size1570; + ::apache::thrift::protocol::TType _etype1573; + xfer += iprot->readListBegin(_etype1573, _size1570); + this->success.resize(_size1570); + uint32_t _i1574; + for (_i1574 = 0; _i1574 < _size1570; ++_i1574) { - xfer += this->success[_i1554].read(iprot); + xfer += this->success[_i1574].read(iprot); } xfer += iprot->readListEnd(); } @@ -30305,10 +30305,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1555; - for (_iter1555 = this->success.begin(); _iter1555 != this->success.end(); ++_iter1555) + std::vector ::const_iterator _iter1575; + for (_iter1575 = this->success.begin(); _iter1575 != this->success.end(); ++_iter1575) { - xfer += (*_iter1555).write(oprot); + xfer += (*_iter1575).write(oprot); } xfer += oprot->writeListEnd(); } @@ -30353,14 +30353,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1556; - ::apache::thrift::protocol::TType _etype1559; - xfer += iprot->readListBegin(_etype1559, _size1556); - (*(this->success)).resize(_size1556); - uint32_t _i1560; - for (_i1560 = 0; _i1560 < _size1556; ++_i1560) + uint32_t _size1576; + ::apache::thrift::protocol::TType _etype1579; + xfer += iprot->readListBegin(_etype1579, _size1576); + (*(this->success)).resize(_size1576); + uint32_t _i1580; + for (_i1580 = 0; _i1580 < _size1576; ++_i1580) { - xfer += (*(this->success))[_i1560].read(iprot); + xfer += (*(this->success))[_i1580].read(iprot); } xfer += iprot->readListEnd(); } @@ -31056,14 +31056,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1561; - ::apache::thrift::protocol::TType _etype1564; - xfer += iprot->readListBegin(_etype1564, _size1561); - this->group_names.resize(_size1561); - uint32_t _i1565; - for (_i1565 = 0; _i1565 < _size1561; ++_i1565) + uint32_t _size1581; + ::apache::thrift::protocol::TType _etype1584; + xfer += iprot->readListBegin(_etype1584, _size1581); + this->group_names.resize(_size1581); + uint32_t _i1585; + for (_i1585 = 0; _i1585 < _size1581; ++_i1585) { - xfer += iprot->readString(this->group_names[_i1565]); + xfer += iprot->readString(this->group_names[_i1585]); } xfer += iprot->readListEnd(); } @@ -31100,10 +31100,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1566; - for (_iter1566 = this->group_names.begin(); _iter1566 != this->group_names.end(); ++_iter1566) + std::vector ::const_iterator _iter1586; + for (_iter1586 = this->group_names.begin(); _iter1586 != this->group_names.end(); ++_iter1586) { - xfer += oprot->writeString((*_iter1566)); + xfer += oprot->writeString((*_iter1586)); } xfer += oprot->writeListEnd(); } @@ -31135,10 +31135,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1567; - for (_iter1567 = (*(this->group_names)).begin(); _iter1567 != (*(this->group_names)).end(); ++_iter1567) + std::vector ::const_iterator _iter1587; + for (_iter1587 = (*(this->group_names)).begin(); _iter1587 != (*(this->group_names)).end(); ++_iter1587) { - xfer += oprot->writeString((*_iter1567)); + xfer += oprot->writeString((*_iter1587)); } xfer += oprot->writeListEnd(); } @@ -31313,9 +31313,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1568; - xfer += iprot->readI32(ecast1568); - this->principal_type = (PrincipalType::type)ecast1568; + int32_t ecast1588; + xfer += iprot->readI32(ecast1588); + this->principal_type = (PrincipalType::type)ecast1588; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -31420,14 +31420,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1569; - ::apache::thrift::protocol::TType _etype1572; - xfer += iprot->readListBegin(_etype1572, _size1569); - this->success.resize(_size1569); - uint32_t _i1573; - for (_i1573 = 0; _i1573 < _size1569; ++_i1573) + uint32_t _size1589; + ::apache::thrift::protocol::TType _etype1592; + xfer += iprot->readListBegin(_etype1592, _size1589); + this->success.resize(_size1589); + uint32_t _i1593; + for (_i1593 = 0; _i1593 < _size1589; ++_i1593) { - xfer += this->success[_i1573].read(iprot); + xfer += this->success[_i1593].read(iprot); } xfer += iprot->readListEnd(); } @@ -31466,10 +31466,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1574; - for (_iter1574 = this->success.begin(); _iter1574 != this->success.end(); ++_iter1574) + std::vector ::const_iterator _iter1594; + for (_iter1594 = this->success.begin(); _iter1594 != this->success.end(); ++_iter1594) { - xfer += (*_iter1574).write(oprot); + xfer += (*_iter1594).write(oprot); } xfer += oprot->writeListEnd(); } @@ -31514,14 +31514,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1575; - ::apache::thrift::protocol::TType _etype1578; - xfer += iprot->readListBegin(_etype1578, _size1575); - (*(this->success)).resize(_size1575); - uint32_t _i1579; - for (_i1579 = 0; _i1579 < _size1575; ++_i1579) + uint32_t _size1595; + ::apache::thrift::protocol::TType _etype1598; + xfer += iprot->readListBegin(_etype1598, _size1595); + (*(this->success)).resize(_size1595); + uint32_t _i1599; + for (_i1599 = 0; _i1599 < _size1595; ++_i1599) { - xfer += (*(this->success))[_i1579].read(iprot); + xfer += (*(this->success))[_i1599].read(iprot); } xfer += iprot->readListEnd(); } @@ -32209,14 +32209,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1580; - ::apache::thrift::protocol::TType _etype1583; - xfer += iprot->readListBegin(_etype1583, _size1580); - this->group_names.resize(_size1580); - uint32_t _i1584; - for (_i1584 = 0; _i1584 < _size1580; ++_i1584) + uint32_t _size1600; + ::apache::thrift::protocol::TType _etype1603; + xfer += iprot->readListBegin(_etype1603, _size1600); + this->group_names.resize(_size1600); + uint32_t _i1604; + for (_i1604 = 0; _i1604 < _size1600; ++_i1604) { - xfer += iprot->readString(this->group_names[_i1584]); + xfer += iprot->readString(this->group_names[_i1604]); } xfer += iprot->readListEnd(); } @@ -32249,10 +32249,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1585; - for (_iter1585 = this->group_names.begin(); _iter1585 != this->group_names.end(); ++_iter1585) + std::vector ::const_iterator _iter1605; + for (_iter1605 = this->group_names.begin(); _iter1605 != this->group_names.end(); ++_iter1605) { - xfer += oprot->writeString((*_iter1585)); + xfer += oprot->writeString((*_iter1605)); } xfer += oprot->writeListEnd(); } @@ -32280,10 +32280,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1586; - for (_iter1586 = (*(this->group_names)).begin(); _iter1586 != (*(this->group_names)).end(); ++_iter1586) + std::vector ::const_iterator _iter1606; + for (_iter1606 = (*(this->group_names)).begin(); _iter1606 != (*(this->group_names)).end(); ++_iter1606) { - xfer += oprot->writeString((*_iter1586)); + xfer += oprot->writeString((*_iter1606)); } xfer += oprot->writeListEnd(); } @@ -32324,14 +32324,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1587; - ::apache::thrift::protocol::TType _etype1590; - xfer += iprot->readListBegin(_etype1590, _size1587); - this->success.resize(_size1587); - uint32_t _i1591; - for (_i1591 = 0; _i1591 < _size1587; ++_i1591) + uint32_t _size1607; + ::apache::thrift::protocol::TType _etype1610; + xfer += iprot->readListBegin(_etype1610, _size1607); + this->success.resize(_size1607); + uint32_t _i1611; + for (_i1611 = 0; _i1611 < _size1607; ++_i1611) { - xfer += iprot->readString(this->success[_i1591]); + xfer += iprot->readString(this->success[_i1611]); } xfer += iprot->readListEnd(); } @@ -32370,10 +32370,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1592; - for (_iter1592 = this->success.begin(); _iter1592 != this->success.end(); ++_iter1592) + std::vector ::const_iterator _iter1612; + for (_iter1612 = this->success.begin(); _iter1612 != this->success.end(); ++_iter1612) { - xfer += oprot->writeString((*_iter1592)); + xfer += oprot->writeString((*_iter1612)); } xfer += oprot->writeListEnd(); } @@ -32418,14 +32418,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1593; - ::apache::thrift::protocol::TType _etype1596; - xfer += iprot->readListBegin(_etype1596, _size1593); - (*(this->success)).resize(_size1593); - uint32_t _i1597; - for (_i1597 = 0; _i1597 < _size1593; ++_i1597) + uint32_t _size1613; + ::apache::thrift::protocol::TType _etype1616; + xfer += iprot->readListBegin(_etype1616, _size1613); + (*(this->success)).resize(_size1613); + uint32_t _i1617; + for (_i1617 = 0; _i1617 < _size1613; ++_i1617) { - xfer += iprot->readString((*(this->success))[_i1597]); + xfer += iprot->readString((*(this->success))[_i1617]); } xfer += iprot->readListEnd(); } @@ -33736,14 +33736,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1598; - ::apache::thrift::protocol::TType _etype1601; - xfer += iprot->readListBegin(_etype1601, _size1598); - this->success.resize(_size1598); - uint32_t _i1602; - for (_i1602 = 0; _i1602 < _size1598; ++_i1602) + uint32_t _size1618; + ::apache::thrift::protocol::TType _etype1621; + xfer += iprot->readListBegin(_etype1621, _size1618); + this->success.resize(_size1618); + uint32_t _i1622; + for (_i1622 = 0; _i1622 < _size1618; ++_i1622) { - xfer += iprot->readString(this->success[_i1602]); + xfer += iprot->readString(this->success[_i1622]); } xfer += iprot->readListEnd(); } @@ -33774,10 +33774,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1603; - for (_iter1603 = this->success.begin(); _iter1603 != this->success.end(); ++_iter1603) + std::vector ::const_iterator _iter1623; + for (_iter1623 = this->success.begin(); _iter1623 != this->success.end(); ++_iter1623) { - xfer += oprot->writeString((*_iter1603)); + xfer += oprot->writeString((*_iter1623)); } xfer += oprot->writeListEnd(); } @@ -33818,14 +33818,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1604; - ::apache::thrift::protocol::TType _etype1607; - xfer += iprot->readListBegin(_etype1607, _size1604); - (*(this->success)).resize(_size1604); - uint32_t _i1608; - for (_i1608 = 0; _i1608 < _size1604; ++_i1608) + uint32_t _size1624; + ::apache::thrift::protocol::TType _etype1627; + xfer += iprot->readListBegin(_etype1627, _size1624); + (*(this->success)).resize(_size1624); + uint32_t _i1628; + for (_i1628 = 0; _i1628 < _size1624; ++_i1628) { - xfer += iprot->readString((*(this->success))[_i1608]); + xfer += iprot->readString((*(this->success))[_i1628]); } xfer += iprot->readListEnd(); } @@ -34551,14 +34551,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1609; - ::apache::thrift::protocol::TType _etype1612; - xfer += iprot->readListBegin(_etype1612, _size1609); - this->success.resize(_size1609); - uint32_t _i1613; - for (_i1613 = 0; _i1613 < _size1609; ++_i1613) + uint32_t _size1629; + ::apache::thrift::protocol::TType _etype1632; + xfer += iprot->readListBegin(_etype1632, _size1629); + this->success.resize(_size1629); + uint32_t _i1633; + for (_i1633 = 0; _i1633 < _size1629; ++_i1633) { - xfer += iprot->readString(this->success[_i1613]); + xfer += iprot->readString(this->success[_i1633]); } xfer += iprot->readListEnd(); } @@ -34589,10 +34589,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1614; - for (_iter1614 = this->success.begin(); _iter1614 != this->success.end(); ++_iter1614) + std::vector ::const_iterator _iter1634; + for (_iter1634 = this->success.begin(); _iter1634 != this->success.end(); ++_iter1634) { - xfer += oprot->writeString((*_iter1614)); + xfer += oprot->writeString((*_iter1634)); } xfer += oprot->writeListEnd(); } @@ -34633,14 +34633,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1615; - ::apache::thrift::protocol::TType _etype1618; - xfer += iprot->readListBegin(_etype1618, _size1615); - (*(this->success)).resize(_size1615); - uint32_t _i1619; - for (_i1619 = 0; _i1619 < _size1615; ++_i1619) + uint32_t _size1635; + ::apache::thrift::protocol::TType _etype1638; + xfer += iprot->readListBegin(_etype1638, _size1635); + (*(this->success)).resize(_size1635); + uint32_t _i1639; + for (_i1639 = 0; _i1639 < _size1635; ++_i1639) { - xfer += iprot->readString((*(this->success))[_i1619]); + xfer += iprot->readString((*(this->success))[_i1639]); } xfer += iprot->readListEnd(); } @@ -42567,1038 +42567,1314 @@ uint32_t ThriftHiveMetastore_get_triggers_for_resourceplan_presult::read(::apach return xfer; } -void ThriftHiveMetastoreClient::getMetaConf(std::string& _return, const std::string& key) -{ - send_getMetaConf(key); - recv_getMetaConf(_return); + +ThriftHiveMetastore_create_or_update_wm_pool_args::~ThriftHiveMetastore_create_or_update_wm_pool_args() throw() { } -void ThriftHiveMetastoreClient::send_getMetaConf(const std::string& key) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("getMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_getMetaConf_pargs args; - args.key = &key; - args.write(oprot_); +uint32_t ThriftHiveMetastore_create_or_update_wm_pool_args::read(::apache::thrift::protocol::TProtocol* iprot) { - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; -void ThriftHiveMetastoreClient::recv_getMetaConf(std::string& _return) -{ + xfer += iprot->readStructBegin(fname); - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + using ::apache::thrift::protocol::TProtocolException; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("getMetaConf") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_getMetaConf_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.o1) { - throw result.o1; + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->request.read(iprot); + this->__isset.request = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getMetaConf failed: unknown result"); -} -void ThriftHiveMetastoreClient::setMetaConf(const std::string& key, const std::string& value) -{ - send_setMetaConf(key, value); - recv_setMetaConf(); + xfer += iprot->readStructEnd(); + + return xfer; } -void ThriftHiveMetastoreClient::send_setMetaConf(const std::string& key, const std::string& value) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("setMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); +uint32_t ThriftHiveMetastore_create_or_update_wm_pool_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_or_update_wm_pool_args"); - ThriftHiveMetastore_setMetaConf_pargs args; - args.key = &key; - args.value = &value; - args.write(oprot_); + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->request.write(oprot); + xfer += oprot->writeFieldEnd(); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; } -void ThriftHiveMetastoreClient::recv_setMetaConf() -{ - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; +ThriftHiveMetastore_create_or_update_wm_pool_pargs::~ThriftHiveMetastore_create_or_update_wm_pool_pargs() throw() { +} - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("setMetaConf") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_setMetaConf_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - throw result.o1; - } - return; -} +uint32_t ThriftHiveMetastore_create_or_update_wm_pool_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_or_update_wm_pool_pargs"); -void ThriftHiveMetastoreClient::create_database(const Database& database) -{ - send_create_database(database); - recv_create_database(); -} + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldEnd(); -void ThriftHiveMetastoreClient::send_create_database(const Database& database) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("create_database", ::apache::thrift::protocol::T_CALL, cseqid); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} - ThriftHiveMetastore_create_database_pargs args; - args.database = &database; - args.write(oprot_); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); +ThriftHiveMetastore_create_or_update_wm_pool_result::~ThriftHiveMetastore_create_or_update_wm_pool_result() throw() { } -void ThriftHiveMetastoreClient::recv_create_database() -{ - int32_t rseqid = 0; +uint32_t ThriftHiveMetastore_create_or_update_wm_pool_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + ::apache::thrift::protocol::TType ftype; + int16_t fid; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("create_database") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_create_database_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + xfer += iprot->readStructBegin(fname); - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - return; -} + using ::apache::thrift::protocol::TProtocolException; -void ThriftHiveMetastoreClient::get_database(Database& _return, const std::string& name) -{ - send_get_database(name); - recv_get_database(_return); -} -void ThriftHiveMetastoreClient::send_get_database(const std::string& name) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("get_database", ::apache::thrift::protocol::T_CALL, cseqid); + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } - ThriftHiveMetastore_get_database_pargs args; - args.name = &name; - args.write(oprot_); + xfer += iprot->readStructEnd(); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); + return xfer; } -void ThriftHiveMetastoreClient::recv_get_database(Database& _return) -{ +uint32_t ThriftHiveMetastore_create_or_update_wm_pool_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + uint32_t xfer = 0; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_database") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_get_database_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_or_update_wm_pool_result"); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o4) { + xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->o4.write(oprot); + xfer += oprot->writeFieldEnd(); } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_database failed: unknown result"); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; } -void ThriftHiveMetastoreClient::drop_database(const std::string& name, const bool deleteData, const bool cascade) -{ - send_drop_database(name, deleteData, cascade); - recv_drop_database(); + +ThriftHiveMetastore_create_or_update_wm_pool_presult::~ThriftHiveMetastore_create_or_update_wm_pool_presult() throw() { } -void ThriftHiveMetastoreClient::send_drop_database(const std::string& name, const bool deleteData, const bool cascade) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_database_pargs args; - args.name = &name; - args.deleteData = &deleteData; - args.cascade = &cascade; - args.write(oprot_); +uint32_t ThriftHiveMetastore_create_or_update_wm_pool_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; -void ThriftHiveMetastoreClient::recv_drop_database() -{ + xfer += iprot->readStructBegin(fname); - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + using ::apache::thrift::protocol::TProtocolException; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("drop_database") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_drop_database_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); } - return; -} -void ThriftHiveMetastoreClient::get_databases(std::vector & _return, const std::string& pattern) -{ - send_get_databases(pattern); - recv_get_databases(_return); -} + xfer += iprot->readStructEnd(); -void ThriftHiveMetastoreClient::send_get_databases(const std::string& pattern) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_CALL, cseqid); + return xfer; +} - ThriftHiveMetastore_get_databases_pargs args; - args.pattern = &pattern; - args.write(oprot_); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); +ThriftHiveMetastore_drop_wm_pool_args::~ThriftHiveMetastore_drop_wm_pool_args() throw() { } -void ThriftHiveMetastoreClient::recv_get_databases(std::vector & _return) -{ - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; +uint32_t ThriftHiveMetastore_drop_wm_pool_args::read(::apache::thrift::protocol::TProtocol* iprot) { - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_databases") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_get_databases_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.o1) { - throw result.o1; + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->request.read(iprot); + this->__isset.request = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_databases failed: unknown result"); -} -void ThriftHiveMetastoreClient::get_all_databases(std::vector & _return) -{ - send_get_all_databases(); - recv_get_all_databases(_return); + xfer += iprot->readStructEnd(); + + return xfer; } -void ThriftHiveMetastoreClient::send_get_all_databases() -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_CALL, cseqid); +uint32_t ThriftHiveMetastore_drop_wm_pool_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_wm_pool_args"); - ThriftHiveMetastore_get_all_databases_pargs args; - args.write(oprot_); + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->request.write(oprot); + xfer += oprot->writeFieldEnd(); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; } -void ThriftHiveMetastoreClient::recv_get_all_databases(std::vector & _return) -{ - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; +ThriftHiveMetastore_drop_wm_pool_pargs::~ThriftHiveMetastore_drop_wm_pool_pargs() throw() { +} - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_all_databases") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_get_all_databases_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.o1) { - throw result.o1; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_databases failed: unknown result"); -} +uint32_t ThriftHiveMetastore_drop_wm_pool_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_wm_pool_pargs"); -void ThriftHiveMetastoreClient::alter_database(const std::string& dbname, const Database& db) -{ - send_alter_database(dbname, db); - recv_alter_database(); -} + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldEnd(); -void ThriftHiveMetastoreClient::send_alter_database(const std::string& dbname, const Database& db) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("alter_database", ::apache::thrift::protocol::T_CALL, cseqid); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} - ThriftHiveMetastore_alter_database_pargs args; - args.dbname = &dbname; - args.db = &db; - args.write(oprot_); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); +ThriftHiveMetastore_drop_wm_pool_result::~ThriftHiveMetastore_drop_wm_pool_result() throw() { } -void ThriftHiveMetastoreClient::recv_alter_database() -{ - int32_t rseqid = 0; +uint32_t ThriftHiveMetastore_drop_wm_pool_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + ::apache::thrift::protocol::TType ftype; + int16_t fid; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("alter_database") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_alter_database_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + xfer += iprot->readStructBegin(fname); - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); } - return; -} -void ThriftHiveMetastoreClient::get_type(Type& _return, const std::string& name) -{ - send_get_type(name); - recv_get_type(_return); + xfer += iprot->readStructEnd(); + + return xfer; } -void ThriftHiveMetastoreClient::send_get_type(const std::string& name) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("get_type", ::apache::thrift::protocol::T_CALL, cseqid); +uint32_t ThriftHiveMetastore_drop_wm_pool_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - ThriftHiveMetastore_get_type_pargs args; - args.name = &name; - args.write(oprot_); + uint32_t xfer = 0; - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_wm_pool_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; } -void ThriftHiveMetastoreClient::recv_get_type(Type& _return) -{ - int32_t rseqid = 0; +ThriftHiveMetastore_drop_wm_pool_presult::~ThriftHiveMetastore_drop_wm_pool_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_drop_wm_pool_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + ::apache::thrift::protocol::TType ftype; + int16_t fid; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_type") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_get_type_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + xfer += iprot->readStructBegin(fname); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type failed: unknown result"); -} -bool ThriftHiveMetastoreClient::create_type(const Type& type) -{ - send_create_type(type); - return recv_create_type(); -} + xfer += iprot->readStructEnd(); -void ThriftHiveMetastoreClient::send_create_type(const Type& type) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("create_type", ::apache::thrift::protocol::T_CALL, cseqid); + return xfer; +} - ThriftHiveMetastore_create_type_pargs args; - args.type = &type; - args.write(oprot_); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); +ThriftHiveMetastore_create_or_update_wm_mapping_args::~ThriftHiveMetastore_create_or_update_wm_mapping_args() throw() { } -bool ThriftHiveMetastoreClient::recv_create_type() -{ - int32_t rseqid = 0; +uint32_t ThriftHiveMetastore_create_or_update_wm_mapping_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + ::apache::thrift::protocol::TType ftype; + int16_t fid; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("create_type") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - bool _return; - ThriftHiveMetastore_create_type_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + xfer += iprot->readStructBegin(fname); - if (result.__isset.success) { - return _return; - } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->request.read(iprot); + this->__isset.request = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_type failed: unknown result"); + + xfer += iprot->readStructEnd(); + + return xfer; } -bool ThriftHiveMetastoreClient::drop_type(const std::string& type) -{ - send_drop_type(type); - return recv_drop_type(); +uint32_t ThriftHiveMetastore_create_or_update_wm_mapping_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_or_update_wm_mapping_args"); + + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->request.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; } -void ThriftHiveMetastoreClient::send_drop_type(const std::string& type) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_type_pargs args; - args.type = &type; - args.write(oprot_); +ThriftHiveMetastore_create_or_update_wm_mapping_pargs::~ThriftHiveMetastore_create_or_update_wm_mapping_pargs() throw() { +} - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); + +uint32_t ThriftHiveMetastore_create_or_update_wm_mapping_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_or_update_wm_mapping_pargs"); + + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; } -bool ThriftHiveMetastoreClient::recv_drop_type() -{ - int32_t rseqid = 0; +ThriftHiveMetastore_create_or_update_wm_mapping_result::~ThriftHiveMetastore_create_or_update_wm_mapping_result() throw() { +} + + +uint32_t ThriftHiveMetastore_create_or_update_wm_mapping_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + ::apache::thrift::protocol::TType ftype; + int16_t fid; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("drop_type") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - bool _return; - ThriftHiveMetastore_drop_type_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + xfer += iprot->readStructBegin(fname); - if (result.__isset.success) { - return _return; - } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_type failed: unknown result"); -} + using ::apache::thrift::protocol::TProtocolException; -void ThriftHiveMetastoreClient::get_type_all(std::map & _return, const std::string& name) -{ - send_get_type_all(name); - recv_get_type_all(_return); -} -void ThriftHiveMetastoreClient::send_get_type_all(const std::string& name) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("get_type_all", ::apache::thrift::protocol::T_CALL, cseqid); + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } - ThriftHiveMetastore_get_type_all_pargs args; - args.name = &name; - args.write(oprot_); + xfer += iprot->readStructEnd(); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); + return xfer; } -void ThriftHiveMetastoreClient::recv_get_type_all(std::map & _return) -{ +uint32_t ThriftHiveMetastore_create_or_update_wm_mapping_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + uint32_t xfer = 0; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_type_all") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_get_type_all_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_or_update_wm_mapping_result"); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.o2) { - throw result.o2; + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o4) { + xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->o4.write(oprot); + xfer += oprot->writeFieldEnd(); } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type_all failed: unknown result"); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; } -void ThriftHiveMetastoreClient::get_fields(std::vector & _return, const std::string& db_name, const std::string& table_name) -{ - send_get_fields(db_name, table_name); - recv_get_fields(_return); + +ThriftHiveMetastore_create_or_update_wm_mapping_presult::~ThriftHiveMetastore_create_or_update_wm_mapping_presult() throw() { } -void ThriftHiveMetastoreClient::send_get_fields(const std::string& db_name, const std::string& table_name) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("get_fields", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_fields_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; - args.write(oprot_); +uint32_t ThriftHiveMetastore_create_or_update_wm_mapping_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; -void ThriftHiveMetastoreClient::recv_get_fields(std::vector & _return) -{ + xfer += iprot->readStructBegin(fname); - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + using ::apache::thrift::protocol::TProtocolException; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_fields") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_get_fields_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_fields failed: unknown result"); -} -void ThriftHiveMetastoreClient::get_fields_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) -{ - send_get_fields_with_environment_context(db_name, table_name, environment_context); - recv_get_fields_with_environment_context(_return); -} + xfer += iprot->readStructEnd(); -void ThriftHiveMetastoreClient::send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("get_fields_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + return xfer; +} - ThriftHiveMetastore_get_fields_with_environment_context_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; - args.environment_context = &environment_context; - args.write(oprot_); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); +ThriftHiveMetastore_drop_wm_mapping_args::~ThriftHiveMetastore_drop_wm_mapping_args() throw() { } -void ThriftHiveMetastoreClient::recv_get_fields_with_environment_context(std::vector & _return) -{ - int32_t rseqid = 0; +uint32_t ThriftHiveMetastore_drop_wm_mapping_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + ::apache::thrift::protocol::TType ftype; + int16_t fid; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_fields_with_environment_context") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_get_fields_with_environment_context_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + xfer += iprot->readStructBegin(fname); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->request.read(iprot); + this->__isset.request = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_fields_with_environment_context failed: unknown result"); -} -void ThriftHiveMetastoreClient::get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name) -{ - send_get_schema(db_name, table_name); - recv_get_schema(_return); + xfer += iprot->readStructEnd(); + + return xfer; } -void ThriftHiveMetastoreClient::send_get_schema(const std::string& db_name, const std::string& table_name) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("get_schema", ::apache::thrift::protocol::T_CALL, cseqid); +uint32_t ThriftHiveMetastore_drop_wm_mapping_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_wm_mapping_args"); - ThriftHiveMetastore_get_schema_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; - args.write(oprot_); + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->request.write(oprot); + xfer += oprot->writeFieldEnd(); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; } -void ThriftHiveMetastoreClient::recv_get_schema(std::vector & _return) -{ - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; +ThriftHiveMetastore_drop_wm_mapping_pargs::~ThriftHiveMetastore_drop_wm_mapping_pargs() throw() { +} - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_schema") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_get_schema_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_schema failed: unknown result"); -} +uint32_t ThriftHiveMetastore_drop_wm_mapping_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_wm_mapping_pargs"); -void ThriftHiveMetastoreClient::get_schema_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) -{ - send_get_schema_with_environment_context(db_name, table_name, environment_context); - recv_get_schema_with_environment_context(_return); -} + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldEnd(); -void ThriftHiveMetastoreClient::send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("get_schema_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} - ThriftHiveMetastore_get_schema_with_environment_context_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; - args.environment_context = &environment_context; - args.write(oprot_); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); +ThriftHiveMetastore_drop_wm_mapping_result::~ThriftHiveMetastore_drop_wm_mapping_result() throw() { } -void ThriftHiveMetastoreClient::recv_get_schema_with_environment_context(std::vector & _return) -{ - int32_t rseqid = 0; +uint32_t ThriftHiveMetastore_drop_wm_mapping_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + ::apache::thrift::protocol::TType ftype; + int16_t fid; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_schema_with_environment_context") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_get_schema_with_environment_context_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + xfer += iprot->readStructBegin(fname); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); } - if (result.__isset.o3) { - throw result.o3; + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_wm_mapping_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_wm_mapping_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_schema_with_environment_context failed: unknown result"); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; } -void ThriftHiveMetastoreClient::create_table(const Table& tbl) + +ThriftHiveMetastore_drop_wm_mapping_presult::~ThriftHiveMetastore_drop_wm_mapping_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_drop_wm_mapping_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args::~ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args() throw() { +} + + +uint32_t ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->request.read(iprot); + this->__isset.request = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args"); + + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->request.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_pargs::~ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_pargs"); + + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result::~ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result() throw() { +} + + +uint32_t ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o4) { + xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->o4.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult::~ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +void ThriftHiveMetastoreClient::getMetaConf(std::string& _return, const std::string& key) { - send_create_table(tbl); - recv_create_table(); + send_getMetaConf(key); + recv_getMetaConf(_return); } -void ThriftHiveMetastoreClient::send_create_table(const Table& tbl) +void ThriftHiveMetastoreClient::send_getMetaConf(const std::string& key) { int32_t cseqid = 0; - oprot_->writeMessageBegin("create_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("getMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_pargs args; - args.tbl = &tbl; + ThriftHiveMetastore_getMetaConf_pargs args; + args.key = &key; args.write(oprot_); oprot_->writeMessageEnd(); @@ -43606,7 +43882,7 @@ void ThriftHiveMetastoreClient::send_create_table(const Table& tbl) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_create_table() +void ThriftHiveMetastoreClient::recv_getMetaConf(std::string& _return) { int32_t rseqid = 0; @@ -43626,45 +43902,41 @@ void ThriftHiveMetastoreClient::recv_create_table() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table") != 0) { + if (fname.compare("getMetaConf") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_create_table_presult result; + ThriftHiveMetastore_getMetaConf_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - if (result.__isset.o4) { - throw result.o4; - } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getMetaConf failed: unknown result"); } -void ThriftHiveMetastoreClient::create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::setMetaConf(const std::string& key, const std::string& value) { - send_create_table_with_environment_context(tbl, environment_context); - recv_create_table_with_environment_context(); + send_setMetaConf(key, value); + recv_setMetaConf(); } -void ThriftHiveMetastoreClient::send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::send_setMetaConf(const std::string& key, const std::string& value) { int32_t cseqid = 0; - oprot_->writeMessageBegin("create_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("setMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_with_environment_context_pargs args; - args.tbl = &tbl; - args.environment_context = &environment_context; + ThriftHiveMetastore_setMetaConf_pargs args; + args.key = &key; + args.value = &value; args.write(oprot_); oprot_->writeMessageEnd(); @@ -43672,7 +43944,7 @@ void ThriftHiveMetastoreClient::send_create_table_with_environment_context(const oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_create_table_with_environment_context() +void ThriftHiveMetastoreClient::recv_setMetaConf() { int32_t rseqid = 0; @@ -43692,12 +43964,12 @@ void ThriftHiveMetastoreClient::recv_create_table_with_environment_context() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table_with_environment_context") != 0) { + if (fname.compare("setMetaConf") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_create_table_with_environment_context_presult result; + ThriftHiveMetastore_setMetaConf_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -43705,35 +43977,22 @@ void ThriftHiveMetastoreClient::recv_create_table_with_environment_context() if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - if (result.__isset.o4) { - throw result.o4; - } return; } -void ThriftHiveMetastoreClient::create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) +void ThriftHiveMetastoreClient::create_database(const Database& database) { - send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); - recv_create_table_with_constraints(); + send_create_database(database); + recv_create_database(); } -void ThriftHiveMetastoreClient::send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) +void ThriftHiveMetastoreClient::send_create_database(const Database& database) { int32_t cseqid = 0; - oprot_->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_database", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_with_constraints_pargs args; - args.tbl = &tbl; - args.primaryKeys = &primaryKeys; - args.foreignKeys = &foreignKeys; - args.uniqueConstraints = &uniqueConstraints; - args.notNullConstraints = ¬NullConstraints; + ThriftHiveMetastore_create_database_pargs args; + args.database = &database; args.write(oprot_); oprot_->writeMessageEnd(); @@ -43741,7 +44000,7 @@ void ThriftHiveMetastoreClient::send_create_table_with_constraints(const Table& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_create_table_with_constraints() +void ThriftHiveMetastoreClient::recv_create_database() { int32_t rseqid = 0; @@ -43761,12 +44020,12 @@ void ThriftHiveMetastoreClient::recv_create_table_with_constraints() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table_with_constraints") != 0) { + if (fname.compare("create_database") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_create_table_with_constraints_presult result; + ThriftHiveMetastore_create_database_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -43780,25 +44039,22 @@ void ThriftHiveMetastoreClient::recv_create_table_with_constraints() if (result.__isset.o3) { throw result.o3; } - if (result.__isset.o4) { - throw result.o4; - } return; } -void ThriftHiveMetastoreClient::drop_constraint(const DropConstraintRequest& req) +void ThriftHiveMetastoreClient::get_database(Database& _return, const std::string& name) { - send_drop_constraint(req); - recv_drop_constraint(); + send_get_database(name); + recv_get_database(_return); } -void ThriftHiveMetastoreClient::send_drop_constraint(const DropConstraintRequest& req) +void ThriftHiveMetastoreClient::send_get_database(const std::string& name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_constraint", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_database", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_constraint_pargs args; - args.req = &req; + ThriftHiveMetastore_get_database_pargs args; + args.name = &name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -43806,7 +44062,7 @@ void ThriftHiveMetastoreClient::send_drop_constraint(const DropConstraintRequest oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_drop_constraint() +void ThriftHiveMetastoreClient::recv_get_database(Database& _return) { int32_t rseqid = 0; @@ -43826,38 +44082,45 @@ void ThriftHiveMetastoreClient::recv_drop_constraint() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_constraint") != 0) { + if (fname.compare("get_database") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_drop_constraint_presult result; + ThriftHiveMetastore_get_database_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o3) { - throw result.o3; + if (result.__isset.o2) { + throw result.o2; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_database failed: unknown result"); } -void ThriftHiveMetastoreClient::add_primary_key(const AddPrimaryKeyRequest& req) +void ThriftHiveMetastoreClient::drop_database(const std::string& name, const bool deleteData, const bool cascade) { - send_add_primary_key(req); - recv_add_primary_key(); + send_drop_database(name, deleteData, cascade); + recv_drop_database(); } -void ThriftHiveMetastoreClient::send_add_primary_key(const AddPrimaryKeyRequest& req) +void ThriftHiveMetastoreClient::send_drop_database(const std::string& name, const bool deleteData, const bool cascade) { int32_t cseqid = 0; - oprot_->writeMessageBegin("add_primary_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_primary_key_pargs args; - args.req = &req; + ThriftHiveMetastore_drop_database_pargs args; + args.name = &name; + args.deleteData = &deleteData; + args.cascade = &cascade; args.write(oprot_); oprot_->writeMessageEnd(); @@ -43865,7 +44128,7 @@ void ThriftHiveMetastoreClient::send_add_primary_key(const AddPrimaryKeyRequest& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_add_primary_key() +void ThriftHiveMetastoreClient::recv_drop_database() { int32_t rseqid = 0; @@ -43885,12 +44148,12 @@ void ThriftHiveMetastoreClient::recv_add_primary_key() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_primary_key") != 0) { + if (fname.compare("drop_database") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_add_primary_key_presult result; + ThriftHiveMetastore_drop_database_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -43901,22 +44164,25 @@ void ThriftHiveMetastoreClient::recv_add_primary_key() if (result.__isset.o2) { throw result.o2; } + if (result.__isset.o3) { + throw result.o3; + } return; } -void ThriftHiveMetastoreClient::add_foreign_key(const AddForeignKeyRequest& req) +void ThriftHiveMetastoreClient::get_databases(std::vector & _return, const std::string& pattern) { - send_add_foreign_key(req); - recv_add_foreign_key(); + send_get_databases(pattern); + recv_get_databases(_return); } -void ThriftHiveMetastoreClient::send_add_foreign_key(const AddForeignKeyRequest& req) +void ThriftHiveMetastoreClient::send_get_databases(const std::string& pattern) { int32_t cseqid = 0; - oprot_->writeMessageBegin("add_foreign_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_foreign_key_pargs args; - args.req = &req; + ThriftHiveMetastore_get_databases_pargs args; + args.pattern = &pattern; args.write(oprot_); oprot_->writeMessageEnd(); @@ -43924,7 +44190,7 @@ void ThriftHiveMetastoreClient::send_add_foreign_key(const AddForeignKeyRequest& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_add_foreign_key() +void ThriftHiveMetastoreClient::recv_get_databases(std::vector & _return) { int32_t rseqid = 0; @@ -43944,38 +44210,39 @@ void ThriftHiveMetastoreClient::recv_add_foreign_key() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_foreign_key") != 0) { + if (fname.compare("get_databases") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_add_foreign_key_presult result; + ThriftHiveMetastore_get_databases_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_databases failed: unknown result"); } -void ThriftHiveMetastoreClient::add_unique_constraint(const AddUniqueConstraintRequest& req) +void ThriftHiveMetastoreClient::get_all_databases(std::vector & _return) { - send_add_unique_constraint(req); - recv_add_unique_constraint(); + send_get_all_databases(); + recv_get_all_databases(_return); } -void ThriftHiveMetastoreClient::send_add_unique_constraint(const AddUniqueConstraintRequest& req) +void ThriftHiveMetastoreClient::send_get_all_databases() { int32_t cseqid = 0; - oprot_->writeMessageBegin("add_unique_constraint", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_unique_constraint_pargs args; - args.req = &req; + ThriftHiveMetastore_get_all_databases_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -43983,7 +44250,7 @@ void ThriftHiveMetastoreClient::send_add_unique_constraint(const AddUniqueConstr oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_add_unique_constraint() +void ThriftHiveMetastoreClient::recv_get_all_databases(std::vector & _return) { int32_t rseqid = 0; @@ -44003,38 +44270,41 @@ void ThriftHiveMetastoreClient::recv_add_unique_constraint() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_unique_constraint") != 0) { + if (fname.compare("get_all_databases") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_add_unique_constraint_presult result; + ThriftHiveMetastore_get_all_databases_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_databases failed: unknown result"); } -void ThriftHiveMetastoreClient::add_not_null_constraint(const AddNotNullConstraintRequest& req) +void ThriftHiveMetastoreClient::alter_database(const std::string& dbname, const Database& db) { - send_add_not_null_constraint(req); - recv_add_not_null_constraint(); + send_alter_database(dbname, db); + recv_alter_database(); } -void ThriftHiveMetastoreClient::send_add_not_null_constraint(const AddNotNullConstraintRequest& req) +void ThriftHiveMetastoreClient::send_alter_database(const std::string& dbname, const Database& db) { int32_t cseqid = 0; - oprot_->writeMessageBegin("add_not_null_constraint", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_database", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_not_null_constraint_pargs args; - args.req = &req; + ThriftHiveMetastore_alter_database_pargs args; + args.dbname = &dbname; + args.db = &db; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44042,7 +44312,7 @@ void ThriftHiveMetastoreClient::send_add_not_null_constraint(const AddNotNullCon oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_add_not_null_constraint() +void ThriftHiveMetastoreClient::recv_alter_database() { int32_t rseqid = 0; @@ -44062,12 +44332,12 @@ void ThriftHiveMetastoreClient::recv_add_not_null_constraint() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_not_null_constraint") != 0) { + if (fname.compare("alter_database") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_add_not_null_constraint_presult result; + ThriftHiveMetastore_alter_database_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -44081,21 +44351,19 @@ void ThriftHiveMetastoreClient::recv_add_not_null_constraint() return; } -void ThriftHiveMetastoreClient::drop_table(const std::string& dbname, const std::string& name, const bool deleteData) +void ThriftHiveMetastoreClient::get_type(Type& _return, const std::string& name) { - send_drop_table(dbname, name, deleteData); - recv_drop_table(); + send_get_type(name); + recv_get_type(_return); } -void ThriftHiveMetastoreClient::send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData) +void ThriftHiveMetastoreClient::send_get_type(const std::string& name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_table_pargs args; - args.dbname = &dbname; + ThriftHiveMetastore_get_type_pargs args; args.name = &name; - args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44103,7 +44371,7 @@ void ThriftHiveMetastoreClient::send_drop_table(const std::string& dbname, const oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_drop_table() +void ThriftHiveMetastoreClient::recv_get_type(Type& _return) { int32_t rseqid = 0; @@ -44123,41 +44391,43 @@ void ThriftHiveMetastoreClient::recv_drop_table() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_table") != 0) { + if (fname.compare("get_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_drop_table_presult result; + ThriftHiveMetastore_get_type_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o3) { - throw result.o3; + if (result.__isset.o2) { + throw result.o2; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type failed: unknown result"); } -void ThriftHiveMetastoreClient::drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) +bool ThriftHiveMetastoreClient::create_type(const Type& type) { - send_drop_table_with_environment_context(dbname, name, deleteData, environment_context); - recv_drop_table_with_environment_context(); + send_create_type(type); + return recv_create_type(); } -void ThriftHiveMetastoreClient::send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::send_create_type(const Type& type) { int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_table_with_environment_context_pargs args; - args.dbname = &dbname; - args.name = &name; - args.deleteData = &deleteData; - args.environment_context = &environment_context; + ThriftHiveMetastore_create_type_pargs args; + args.type = &type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44165,7 +44435,7 @@ void ThriftHiveMetastoreClient::send_drop_table_with_environment_context(const s oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_drop_table_with_environment_context() +bool ThriftHiveMetastoreClient::recv_create_type() { int32_t rseqid = 0; @@ -44185,40 +44455,46 @@ void ThriftHiveMetastoreClient::recv_drop_table_with_environment_context() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_table_with_environment_context") != 0) { + if (fname.compare("create_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_drop_table_with_environment_context_presult result; + bool _return; + ThriftHiveMetastore_create_type_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + return _return; + } if (result.__isset.o1) { throw result.o1; } + if (result.__isset.o2) { + throw result.o2; + } if (result.__isset.o3) { throw result.o3; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_type failed: unknown result"); } -void ThriftHiveMetastoreClient::truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames) +bool ThriftHiveMetastoreClient::drop_type(const std::string& type) { - send_truncate_table(dbName, tableName, partNames); - recv_truncate_table(); + send_drop_type(type); + return recv_drop_type(); } -void ThriftHiveMetastoreClient::send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames) +void ThriftHiveMetastoreClient::send_drop_type(const std::string& type) { int32_t cseqid = 0; - oprot_->writeMessageBegin("truncate_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_truncate_table_pargs args; - args.dbName = &dbName; - args.tableName = &tableName; - args.partNames = &partNames; + ThriftHiveMetastore_drop_type_pargs args; + args.type = &type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44226,7 +44502,7 @@ void ThriftHiveMetastoreClient::send_truncate_table(const std::string& dbName, c oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_truncate_table() +bool ThriftHiveMetastoreClient::recv_drop_type() { int32_t rseqid = 0; @@ -44246,36 +44522,43 @@ void ThriftHiveMetastoreClient::recv_truncate_table() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("truncate_table") != 0) { + if (fname.compare("drop_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_truncate_table_presult result; + bool _return; + ThriftHiveMetastore_drop_type_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + return _return; + } if (result.__isset.o1) { throw result.o1; } - return; + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_type failed: unknown result"); } -void ThriftHiveMetastoreClient::get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern) +void ThriftHiveMetastoreClient::get_type_all(std::map & _return, const std::string& name) { - send_get_tables(db_name, pattern); - recv_get_tables(_return); + send_get_type_all(name); + recv_get_type_all(_return); } -void ThriftHiveMetastoreClient::send_get_tables(const std::string& db_name, const std::string& pattern) +void ThriftHiveMetastoreClient::send_get_type_all(const std::string& name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_tables", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_type_all", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_tables_pargs args; - args.db_name = &db_name; - args.pattern = &pattern; + ThriftHiveMetastore_get_type_all_pargs args; + args.name = &name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44283,7 +44566,7 @@ void ThriftHiveMetastoreClient::send_get_tables(const std::string& db_name, cons oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_tables(std::vector & _return) +void ThriftHiveMetastoreClient::recv_get_type_all(std::map & _return) { int32_t rseqid = 0; @@ -44303,12 +44586,12 @@ void ThriftHiveMetastoreClient::recv_get_tables(std::vector & _retu iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_tables") != 0) { + if (fname.compare("get_type_all") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_tables_presult result; + ThriftHiveMetastore_get_type_all_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -44318,27 +44601,26 @@ void ThriftHiveMetastoreClient::recv_get_tables(std::vector & _retu // _return pointer has now been filled return; } - if (result.__isset.o1) { - throw result.o1; + if (result.__isset.o2) { + throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_tables failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type_all failed: unknown result"); } -void ThriftHiveMetastoreClient::get_tables_by_type(std::vector & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType) +void ThriftHiveMetastoreClient::get_fields(std::vector & _return, const std::string& db_name, const std::string& table_name) { - send_get_tables_by_type(db_name, pattern, tableType); - recv_get_tables_by_type(_return); + send_get_fields(db_name, table_name); + recv_get_fields(_return); } -void ThriftHiveMetastoreClient::send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType) +void ThriftHiveMetastoreClient::send_get_fields(const std::string& db_name, const std::string& table_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_tables_by_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_fields", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_tables_by_type_pargs args; + ThriftHiveMetastore_get_fields_pargs args; args.db_name = &db_name; - args.pattern = &pattern; - args.tableType = &tableType; + args.table_name = &table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44346,7 +44628,7 @@ void ThriftHiveMetastoreClient::send_get_tables_by_type(const std::string& db_na oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_tables_by_type(std::vector & _return) +void ThriftHiveMetastoreClient::recv_get_fields(std::vector & _return) { int32_t rseqid = 0; @@ -44366,12 +44648,12 @@ void ThriftHiveMetastoreClient::recv_get_tables_by_type(std::vector iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_tables_by_type") != 0) { + if (fname.compare("get_fields") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_tables_by_type_presult result; + ThriftHiveMetastore_get_fields_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -44384,24 +44666,30 @@ void ThriftHiveMetastoreClient::recv_get_tables_by_type(std::vector if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_tables_by_type failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_fields failed: unknown result"); } -void ThriftHiveMetastoreClient::get_table_meta(std::vector & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) +void ThriftHiveMetastoreClient::get_fields_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { - send_get_table_meta(db_patterns, tbl_patterns, tbl_types); - recv_get_table_meta(_return); + send_get_fields_with_environment_context(db_name, table_name, environment_context); + recv_get_fields_with_environment_context(_return); } -void ThriftHiveMetastoreClient::send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) +void ThriftHiveMetastoreClient::send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_table_meta", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_fields_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_meta_pargs args; - args.db_patterns = &db_patterns; - args.tbl_patterns = &tbl_patterns; - args.tbl_types = &tbl_types; + ThriftHiveMetastore_get_fields_with_environment_context_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44409,7 +44697,7 @@ void ThriftHiveMetastoreClient::send_get_table_meta(const std::string& db_patter oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_table_meta(std::vector & _return) +void ThriftHiveMetastoreClient::recv_get_fields_with_environment_context(std::vector & _return) { int32_t rseqid = 0; @@ -44429,12 +44717,12 @@ void ThriftHiveMetastoreClient::recv_get_table_meta(std::vector & _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_meta") != 0) { + if (fname.compare("get_fields_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_table_meta_presult result; + ThriftHiveMetastore_get_fields_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -44447,22 +44735,29 @@ void ThriftHiveMetastoreClient::recv_get_table_meta(std::vector & _re if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_meta failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_fields_with_environment_context failed: unknown result"); } -void ThriftHiveMetastoreClient::get_all_tables(std::vector & _return, const std::string& db_name) +void ThriftHiveMetastoreClient::get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name) { - send_get_all_tables(db_name); - recv_get_all_tables(_return); + send_get_schema(db_name, table_name); + recv_get_schema(_return); } -void ThriftHiveMetastoreClient::send_get_all_tables(const std::string& db_name) +void ThriftHiveMetastoreClient::send_get_schema(const std::string& db_name, const std::string& table_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_all_tables", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_schema", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_tables_pargs args; + ThriftHiveMetastore_get_schema_pargs args; args.db_name = &db_name; + args.table_name = &table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44470,7 +44765,7 @@ void ThriftHiveMetastoreClient::send_get_all_tables(const std::string& db_name) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_all_tables(std::vector & _return) +void ThriftHiveMetastoreClient::recv_get_schema(std::vector & _return) { int32_t rseqid = 0; @@ -44490,12 +44785,12 @@ void ThriftHiveMetastoreClient::recv_get_all_tables(std::vector & _ iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_tables") != 0) { + if (fname.compare("get_schema") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_all_tables_presult result; + ThriftHiveMetastore_get_schema_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -44508,23 +44803,30 @@ void ThriftHiveMetastoreClient::recv_get_all_tables(std::vector & _ if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_tables failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_schema failed: unknown result"); } -void ThriftHiveMetastoreClient::get_table(Table& _return, const std::string& dbname, const std::string& tbl_name) +void ThriftHiveMetastoreClient::get_schema_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { - send_get_table(dbname, tbl_name); - recv_get_table(_return); + send_get_schema_with_environment_context(db_name, table_name, environment_context); + recv_get_schema_with_environment_context(_return); } -void ThriftHiveMetastoreClient::send_get_table(const std::string& dbname, const std::string& tbl_name) +void ThriftHiveMetastoreClient::send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_schema_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_pargs args; - args.dbname = &dbname; - args.tbl_name = &tbl_name; + ThriftHiveMetastore_get_schema_with_environment_context_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44532,7 +44834,7 @@ void ThriftHiveMetastoreClient::send_get_table(const std::string& dbname, const oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_table(Table& _return) +void ThriftHiveMetastoreClient::recv_get_schema_with_environment_context(std::vector & _return) { int32_t rseqid = 0; @@ -44552,12 +44854,12 @@ void ThriftHiveMetastoreClient::recv_get_table(Table& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table") != 0) { + if (fname.compare("get_schema_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_table_presult result; + ThriftHiveMetastore_get_schema_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -44573,23 +44875,25 @@ void ThriftHiveMetastoreClient::recv_get_table(Table& _return) if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_schema_with_environment_context failed: unknown result"); } -void ThriftHiveMetastoreClient::get_table_objects_by_name(std::vector
& _return, const std::string& dbname, const std::vector & tbl_names) +void ThriftHiveMetastoreClient::create_table(const Table& tbl) { - send_get_table_objects_by_name(dbname, tbl_names); - recv_get_table_objects_by_name(_return); + send_create_table(tbl); + recv_create_table(); } -void ThriftHiveMetastoreClient::send_get_table_objects_by_name(const std::string& dbname, const std::vector & tbl_names) +void ThriftHiveMetastoreClient::send_create_table(const Table& tbl) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_table_objects_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_objects_by_name_pargs args; - args.dbname = &dbname; - args.tbl_names = &tbl_names; + ThriftHiveMetastore_create_table_pargs args; + args.tbl = &tbl; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44597,7 +44901,7 @@ void ThriftHiveMetastoreClient::send_get_table_objects_by_name(const std::string oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_table_objects_by_name(std::vector
& _return) +void ThriftHiveMetastoreClient::recv_create_table() { int32_t rseqid = 0; @@ -44617,37 +44921,45 @@ void ThriftHiveMetastoreClient::recv_get_table_objects_by_name(std::vector
readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_objects_by_name") != 0) { + if (fname.compare("create_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_table_objects_by_name_presult result; - result.success = &_return; + ThriftHiveMetastore_create_table_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; + if (result.__isset.o1) { + throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_objects_by_name failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + return; } -void ThriftHiveMetastoreClient::get_table_req(GetTableResult& _return, const GetTableRequest& req) +void ThriftHiveMetastoreClient::create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) { - send_get_table_req(req); - recv_get_table_req(_return); + send_create_table_with_environment_context(tbl, environment_context); + recv_create_table_with_environment_context(); } -void ThriftHiveMetastoreClient::send_get_table_req(const GetTableRequest& req) +void ThriftHiveMetastoreClient::send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_table_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_req_pargs args; - args.req = &req; + ThriftHiveMetastore_create_table_with_environment_context_pargs args; + args.tbl = &tbl; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44655,7 +44967,7 @@ void ThriftHiveMetastoreClient::send_get_table_req(const GetTableRequest& req) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_table_req(GetTableResult& _return) +void ThriftHiveMetastoreClient::recv_create_table_with_environment_context() { int32_t rseqid = 0; @@ -44675,43 +44987,48 @@ void ThriftHiveMetastoreClient::recv_get_table_req(GetTableResult& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_req") != 0) { + if (fname.compare("create_table_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_table_req_presult result; - result.success = &_return; + ThriftHiveMetastore_create_table_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_req failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + return; } -void ThriftHiveMetastoreClient::get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req) +void ThriftHiveMetastoreClient::create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) { - send_get_table_objects_by_name_req(req); - recv_get_table_objects_by_name_req(_return); + send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); + recv_create_table_with_constraints(); } -void ThriftHiveMetastoreClient::send_get_table_objects_by_name_req(const GetTablesRequest& req) +void ThriftHiveMetastoreClient::send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_table_objects_by_name_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_objects_by_name_req_pargs args; - args.req = &req; + ThriftHiveMetastore_create_table_with_constraints_pargs args; + args.tbl = &tbl; + args.primaryKeys = &primaryKeys; + args.foreignKeys = &foreignKeys; + args.uniqueConstraints = &uniqueConstraints; + args.notNullConstraints = ¬NullConstraints; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44719,7 +45036,7 @@ void ThriftHiveMetastoreClient::send_get_table_objects_by_name_req(const GetTabl oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_table_objects_by_name_req(GetTablesResult& _return) +void ThriftHiveMetastoreClient::recv_create_table_with_constraints() { int32_t rseqid = 0; @@ -44739,21 +45056,16 @@ void ThriftHiveMetastoreClient::recv_get_table_objects_by_name_req(GetTablesResu iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_objects_by_name_req") != 0) { + if (fname.compare("create_table_with_constraints") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_table_objects_by_name_req_presult result; - result.success = &_return; + ThriftHiveMetastore_create_table_with_constraints_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } @@ -44763,24 +45075,25 @@ void ThriftHiveMetastoreClient::recv_get_table_objects_by_name_req(GetTablesResu if (result.__isset.o3) { throw result.o3; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_objects_by_name_req failed: unknown result"); + if (result.__isset.o4) { + throw result.o4; + } + return; } -void ThriftHiveMetastoreClient::get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) +void ThriftHiveMetastoreClient::drop_constraint(const DropConstraintRequest& req) { - send_get_table_names_by_filter(dbname, filter, max_tables); - recv_get_table_names_by_filter(_return); + send_drop_constraint(req); + recv_drop_constraint(); } -void ThriftHiveMetastoreClient::send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables) +void ThriftHiveMetastoreClient::send_drop_constraint(const DropConstraintRequest& req) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_table_names_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_names_by_filter_pargs args; - args.dbname = &dbname; - args.filter = &filter; - args.max_tables = &max_tables; + ThriftHiveMetastore_drop_constraint_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44788,7 +45101,7 @@ void ThriftHiveMetastoreClient::send_get_table_names_by_filter(const std::string oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_table_names_by_filter(std::vector & _return) +void ThriftHiveMetastoreClient::recv_drop_constraint() { int32_t rseqid = 0; @@ -44808,56 +45121,46 @@ void ThriftHiveMetastoreClient::recv_get_table_names_by_filter(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_names_by_filter") != 0) { + if (fname.compare("drop_constraint") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_table_names_by_filter_presult result; - result.success = &_return; + ThriftHiveMetastore_drop_constraint_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } if (result.__isset.o3) { throw result.o3; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_names_by_filter failed: unknown result"); + return; } -void ThriftHiveMetastoreClient::alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) +void ThriftHiveMetastoreClient::add_primary_key(const AddPrimaryKeyRequest& req) { - send_alter_table(dbname, tbl_name, new_tbl); - recv_alter_table(); + send_add_primary_key(req); + recv_add_primary_key(); } -void ThriftHiveMetastoreClient::send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) +void ThriftHiveMetastoreClient::send_add_primary_key(const AddPrimaryKeyRequest& req) { int32_t cseqid = 0; - oprot_->writeMessageBegin("alter_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_primary_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_table_pargs args; - args.dbname = &dbname; - args.tbl_name = &tbl_name; - args.new_tbl = &new_tbl; - args.write(oprot_); + ThriftHiveMetastore_add_primary_key_pargs args; + args.req = &req; + args.write(oprot_); oprot_->writeMessageEnd(); oprot_->getTransport()->writeEnd(); oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_alter_table() +void ThriftHiveMetastoreClient::recv_add_primary_key() { int32_t rseqid = 0; @@ -44877,12 +45180,12 @@ void ThriftHiveMetastoreClient::recv_alter_table() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_table") != 0) { + if (fname.compare("add_primary_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_alter_table_presult result; + ThriftHiveMetastore_add_primary_key_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -44896,22 +45199,19 @@ void ThriftHiveMetastoreClient::recv_alter_table() return; } -void ThriftHiveMetastoreClient::alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::add_foreign_key(const AddForeignKeyRequest& req) { - send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context); - recv_alter_table_with_environment_context(); + send_add_foreign_key(req); + recv_add_foreign_key(); } -void ThriftHiveMetastoreClient::send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::send_add_foreign_key(const AddForeignKeyRequest& req) { int32_t cseqid = 0; - oprot_->writeMessageBegin("alter_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_foreign_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_table_with_environment_context_pargs args; - args.dbname = &dbname; - args.tbl_name = &tbl_name; - args.new_tbl = &new_tbl; - args.environment_context = &environment_context; + ThriftHiveMetastore_add_foreign_key_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44919,7 +45219,7 @@ void ThriftHiveMetastoreClient::send_alter_table_with_environment_context(const oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_alter_table_with_environment_context() +void ThriftHiveMetastoreClient::recv_add_foreign_key() { int32_t rseqid = 0; @@ -44939,12 +45239,12 @@ void ThriftHiveMetastoreClient::recv_alter_table_with_environment_context() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_table_with_environment_context") != 0) { + if (fname.compare("add_foreign_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_alter_table_with_environment_context_presult result; + ThriftHiveMetastore_add_foreign_key_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -44958,22 +45258,19 @@ void ThriftHiveMetastoreClient::recv_alter_table_with_environment_context() return; } -void ThriftHiveMetastoreClient::alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) +void ThriftHiveMetastoreClient::add_unique_constraint(const AddUniqueConstraintRequest& req) { - send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade); - recv_alter_table_with_cascade(); + send_add_unique_constraint(req); + recv_add_unique_constraint(); } -void ThriftHiveMetastoreClient::send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) +void ThriftHiveMetastoreClient::send_add_unique_constraint(const AddUniqueConstraintRequest& req) { int32_t cseqid = 0; - oprot_->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_unique_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_table_with_cascade_pargs args; - args.dbname = &dbname; - args.tbl_name = &tbl_name; - args.new_tbl = &new_tbl; - args.cascade = &cascade; + ThriftHiveMetastore_add_unique_constraint_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -44981,7 +45278,7 @@ void ThriftHiveMetastoreClient::send_alter_table_with_cascade(const std::string& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_alter_table_with_cascade() +void ThriftHiveMetastoreClient::recv_add_unique_constraint() { int32_t rseqid = 0; @@ -45001,12 +45298,12 @@ void ThriftHiveMetastoreClient::recv_alter_table_with_cascade() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_table_with_cascade") != 0) { + if (fname.compare("add_unique_constraint") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_alter_table_with_cascade_presult result; + ThriftHiveMetastore_add_unique_constraint_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -45020,19 +45317,19 @@ void ThriftHiveMetastoreClient::recv_alter_table_with_cascade() return; } -void ThriftHiveMetastoreClient::add_partition(Partition& _return, const Partition& new_part) +void ThriftHiveMetastoreClient::add_not_null_constraint(const AddNotNullConstraintRequest& req) { - send_add_partition(new_part); - recv_add_partition(_return); + send_add_not_null_constraint(req); + recv_add_not_null_constraint(); } -void ThriftHiveMetastoreClient::send_add_partition(const Partition& new_part) +void ThriftHiveMetastoreClient::send_add_not_null_constraint(const AddNotNullConstraintRequest& req) { int32_t cseqid = 0; - oprot_->writeMessageBegin("add_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_not_null_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partition_pargs args; - args.new_part = &new_part; + ThriftHiveMetastore_add_not_null_constraint_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45040,7 +45337,7 @@ void ThriftHiveMetastoreClient::send_add_partition(const Partition& new_part) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_add_partition(Partition& _return) +void ThriftHiveMetastoreClient::recv_add_not_null_constraint() { int32_t rseqid = 0; @@ -45060,47 +45357,40 @@ void ThriftHiveMetastoreClient::recv_add_partition(Partition& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partition") != 0) { + if (fname.compare("add_not_null_constraint") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_add_partition_presult result; - result.success = &_return; + ThriftHiveMetastore_add_not_null_constraint_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition failed: unknown result"); + return; } -void ThriftHiveMetastoreClient::add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::drop_table(const std::string& dbname, const std::string& name, const bool deleteData) { - send_add_partition_with_environment_context(new_part, environment_context); - recv_add_partition_with_environment_context(_return); + send_drop_table(dbname, name, deleteData); + recv_drop_table(); } -void ThriftHiveMetastoreClient::send_add_partition_with_environment_context(const Partition& new_part, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData) { int32_t cseqid = 0; - oprot_->writeMessageBegin("add_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partition_with_environment_context_pargs args; - args.new_part = &new_part; - args.environment_context = &environment_context; + ThriftHiveMetastore_drop_table_pargs args; + args.dbname = &dbname; + args.name = &name; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45108,7 +45398,7 @@ void ThriftHiveMetastoreClient::send_add_partition_with_environment_context(cons oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_add_partition_with_environment_context(Partition& _return) +void ThriftHiveMetastoreClient::recv_drop_table() { int32_t rseqid = 0; @@ -45128,46 +45418,41 @@ void ThriftHiveMetastoreClient::recv_add_partition_with_environment_context(Part iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partition_with_environment_context") != 0) { + if (fname.compare("drop_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_add_partition_with_environment_context_presult result; - result.success = &_return; + ThriftHiveMetastore_drop_table_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } if (result.__isset.o3) { throw result.o3; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition_with_environment_context failed: unknown result"); + return; } -int32_t ThriftHiveMetastoreClient::add_partitions(const std::vector & new_parts) +void ThriftHiveMetastoreClient::drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) { - send_add_partitions(new_parts); - return recv_add_partitions(); + send_drop_table_with_environment_context(dbname, name, deleteData, environment_context); + recv_drop_table_with_environment_context(); } -void ThriftHiveMetastoreClient::send_add_partitions(const std::vector & new_parts) +void ThriftHiveMetastoreClient::send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) { int32_t cseqid = 0; - oprot_->writeMessageBegin("add_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partitions_pargs args; - args.new_parts = &new_parts; + ThriftHiveMetastore_drop_table_with_environment_context_pargs args; + args.dbname = &dbname; + args.name = &name; + args.deleteData = &deleteData; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45175,7 +45460,7 @@ void ThriftHiveMetastoreClient::send_add_partitions(const std::vector oprot_->getTransport()->flush(); } -int32_t ThriftHiveMetastoreClient::recv_add_partitions() +void ThriftHiveMetastoreClient::recv_drop_table_with_environment_context() { int32_t rseqid = 0; @@ -45195,46 +45480,40 @@ int32_t ThriftHiveMetastoreClient::recv_add_partitions() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partitions") != 0) { + if (fname.compare("drop_table_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - int32_t _return; - ThriftHiveMetastore_add_partitions_presult result; - result.success = &_return; + ThriftHiveMetastore_drop_table_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - return _return; - } if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } if (result.__isset.o3) { throw result.o3; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions failed: unknown result"); + return; } -int32_t ThriftHiveMetastoreClient::add_partitions_pspec(const std::vector & new_parts) +void ThriftHiveMetastoreClient::truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames) { - send_add_partitions_pspec(new_parts); - return recv_add_partitions_pspec(); + send_truncate_table(dbName, tableName, partNames); + recv_truncate_table(); } -void ThriftHiveMetastoreClient::send_add_partitions_pspec(const std::vector & new_parts) +void ThriftHiveMetastoreClient::send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames) { int32_t cseqid = 0; - oprot_->writeMessageBegin("add_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("truncate_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partitions_pspec_pargs args; - args.new_parts = &new_parts; + ThriftHiveMetastore_truncate_table_pargs args; + args.dbName = &dbName; + args.tableName = &tableName; + args.partNames = &partNames; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45242,7 +45521,7 @@ void ThriftHiveMetastoreClient::send_add_partitions_pspec(const std::vectorgetTransport()->flush(); } -int32_t ThriftHiveMetastoreClient::recv_add_partitions_pspec() +void ThriftHiveMetastoreClient::recv_truncate_table() { int32_t rseqid = 0; @@ -45262,48 +45541,36 @@ int32_t ThriftHiveMetastoreClient::recv_add_partitions_pspec() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partitions_pspec") != 0) { + if (fname.compare("truncate_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - int32_t _return; - ThriftHiveMetastore_add_partitions_pspec_presult result; - result.success = &_return; + ThriftHiveMetastore_truncate_table_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - return _return; - } if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_pspec failed: unknown result"); + return; } -void ThriftHiveMetastoreClient::append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +void ThriftHiveMetastoreClient::get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern) { - send_append_partition(db_name, tbl_name, part_vals); - recv_append_partition(_return); + send_get_tables(db_name, pattern); + recv_get_tables(_return); } -void ThriftHiveMetastoreClient::send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +void ThriftHiveMetastoreClient::send_get_tables(const std::string& db_name, const std::string& pattern) { int32_t cseqid = 0; - oprot_->writeMessageBegin("append_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_tables", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_pargs args; + ThriftHiveMetastore_get_tables_pargs args; args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; + args.pattern = &pattern; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45311,7 +45578,7 @@ void ThriftHiveMetastoreClient::send_append_partition(const std::string& db_name oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_append_partition(Partition& _return) +void ThriftHiveMetastoreClient::recv_get_tables(std::vector & _return) { int32_t rseqid = 0; @@ -45331,12 +45598,12 @@ void ThriftHiveMetastoreClient::recv_append_partition(Partition& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition") != 0) { + if (fname.compare("get_tables") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_append_partition_presult result; + ThriftHiveMetastore_get_tables_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -45349,28 +45616,24 @@ void ThriftHiveMetastoreClient::recv_append_partition(Partition& _return) if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_tables failed: unknown result"); } -void ThriftHiveMetastoreClient::add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request) +void ThriftHiveMetastoreClient::get_tables_by_type(std::vector & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType) { - send_add_partitions_req(request); - recv_add_partitions_req(_return); + send_get_tables_by_type(db_name, pattern, tableType); + recv_get_tables_by_type(_return); } -void ThriftHiveMetastoreClient::send_add_partitions_req(const AddPartitionsRequest& request) +void ThriftHiveMetastoreClient::send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType) { int32_t cseqid = 0; - oprot_->writeMessageBegin("add_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_tables_by_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partitions_req_pargs args; - args.request = &request; + ThriftHiveMetastore_get_tables_by_type_pargs args; + args.db_name = &db_name; + args.pattern = &pattern; + args.tableType = &tableType; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45378,7 +45641,7 @@ void ThriftHiveMetastoreClient::send_add_partitions_req(const AddPartitionsReque oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_add_partitions_req(AddPartitionsResult& _return) +void ThriftHiveMetastoreClient::recv_get_tables_by_type(std::vector & _return) { int32_t rseqid = 0; @@ -45398,12 +45661,12 @@ void ThriftHiveMetastoreClient::recv_add_partitions_req(AddPartitionsResult& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partitions_req") != 0) { + if (fname.compare("get_tables_by_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_add_partitions_req_presult result; + ThriftHiveMetastore_get_tables_by_type_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -45416,31 +45679,24 @@ void ThriftHiveMetastoreClient::recv_add_partitions_req(AddPartitionsResult& _re if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_req failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_tables_by_type failed: unknown result"); } -void ThriftHiveMetastoreClient::append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::get_table_meta(std::vector & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) { - send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context); - recv_append_partition_with_environment_context(_return); + send_get_table_meta(db_patterns, tbl_patterns, tbl_types); + recv_get_table_meta(_return); } -void ThriftHiveMetastoreClient::send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) { int32_t cseqid = 0; - oprot_->writeMessageBegin("append_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_meta", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_with_environment_context_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.environment_context = &environment_context; + ThriftHiveMetastore_get_table_meta_pargs args; + args.db_patterns = &db_patterns; + args.tbl_patterns = &tbl_patterns; + args.tbl_types = &tbl_types; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45448,7 +45704,7 @@ void ThriftHiveMetastoreClient::send_append_partition_with_environment_context(c oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_append_partition_with_environment_context(Partition& _return) +void ThriftHiveMetastoreClient::recv_get_table_meta(std::vector & _return) { int32_t rseqid = 0; @@ -45468,12 +45724,12 @@ void ThriftHiveMetastoreClient::recv_append_partition_with_environment_context(P iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition_with_environment_context") != 0) { + if (fname.compare("get_table_meta") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_append_partition_with_environment_context_presult result; + ThriftHiveMetastore_get_table_meta_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -45486,30 +45742,22 @@ void ThriftHiveMetastoreClient::recv_append_partition_with_environment_context(P if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_meta failed: unknown result"); } -void ThriftHiveMetastoreClient::append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +void ThriftHiveMetastoreClient::get_all_tables(std::vector & _return, const std::string& db_name) { - send_append_partition_by_name(db_name, tbl_name, part_name); - recv_append_partition_by_name(_return); + send_get_all_tables(db_name); + recv_get_all_tables(_return); } -void ThriftHiveMetastoreClient::send_append_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +void ThriftHiveMetastoreClient::send_get_all_tables(const std::string& db_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("append_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_tables", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_by_name_pargs args; + ThriftHiveMetastore_get_all_tables_pargs args; args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45517,7 +45765,7 @@ void ThriftHiveMetastoreClient::send_append_partition_by_name(const std::string& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_append_partition_by_name(Partition& _return) +void ThriftHiveMetastoreClient::recv_get_all_tables(std::vector & _return) { int32_t rseqid = 0; @@ -45537,12 +45785,12 @@ void ThriftHiveMetastoreClient::recv_append_partition_by_name(Partition& _return iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition_by_name") != 0) { + if (fname.compare("get_all_tables") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_append_partition_by_name_presult result; + ThriftHiveMetastore_get_all_tables_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -45555,31 +45803,23 @@ void ThriftHiveMetastoreClient::recv_append_partition_by_name(Partition& _return if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_tables failed: unknown result"); } -void ThriftHiveMetastoreClient::append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::get_table(Table& _return, const std::string& dbname, const std::string& tbl_name) { - send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context); - recv_append_partition_by_name_with_environment_context(_return); + send_get_table(dbname, tbl_name); + recv_get_table(_return); } -void ThriftHiveMetastoreClient::send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::send_get_table(const std::string& dbname, const std::string& tbl_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("append_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_by_name_with_environment_context_pargs args; - args.db_name = &db_name; + ThriftHiveMetastore_get_table_pargs args; + args.dbname = &dbname; args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45587,7 +45827,7 @@ void ThriftHiveMetastoreClient::send_append_partition_by_name_with_environment_c oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_append_partition_by_name_with_environment_context(Partition& _return) +void ThriftHiveMetastoreClient::recv_get_table(Table& _return) { int32_t rseqid = 0; @@ -45607,12 +45847,12 @@ void ThriftHiveMetastoreClient::recv_append_partition_by_name_with_environment_c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition_by_name_with_environment_context") != 0) { + if (fname.compare("get_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_append_partition_by_name_with_environment_context_presult result; + ThriftHiveMetastore_get_table_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -45628,28 +45868,23 @@ void ThriftHiveMetastoreClient::recv_append_partition_by_name_with_environment_c if (result.__isset.o2) { throw result.o2; } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table failed: unknown result"); } -bool ThriftHiveMetastoreClient::drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) +void ThriftHiveMetastoreClient::get_table_objects_by_name(std::vector
& _return, const std::string& dbname, const std::vector & tbl_names) { - send_drop_partition(db_name, tbl_name, part_vals, deleteData); - return recv_drop_partition(); + send_get_table_objects_by_name(dbname, tbl_names); + recv_get_table_objects_by_name(_return); } -void ThriftHiveMetastoreClient::send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) +void ThriftHiveMetastoreClient::send_get_table_objects_by_name(const std::string& dbname, const std::vector & tbl_names) { int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_objects_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.deleteData = &deleteData; + ThriftHiveMetastore_get_table_objects_by_name_pargs args; + args.dbname = &dbname; + args.tbl_names = &tbl_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45657,7 +45892,7 @@ void ThriftHiveMetastoreClient::send_drop_partition(const std::string& db_name, oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_drop_partition() +void ThriftHiveMetastoreClient::recv_get_table_objects_by_name(std::vector
& _return) { int32_t rseqid = 0; @@ -45677,47 +45912,37 @@ bool ThriftHiveMetastoreClient::recv_drop_partition() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition") != 0) { + if (fname.compare("get_table_objects_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_drop_partition_presult result; + ThriftHiveMetastore_get_table_objects_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; - } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; + // _return pointer has now been filled + return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_objects_by_name failed: unknown result"); } -bool ThriftHiveMetastoreClient::drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::get_table_req(GetTableResult& _return, const GetTableRequest& req) { - send_drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, environment_context); - return recv_drop_partition_with_environment_context(); + send_get_table_req(req); + recv_get_table_req(_return); } -void ThriftHiveMetastoreClient::send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::send_get_table_req(const GetTableRequest& req) { int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_with_environment_context_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.deleteData = &deleteData; - args.environment_context = &environment_context; + ThriftHiveMetastore_get_table_req_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45725,7 +45950,7 @@ void ThriftHiveMetastoreClient::send_drop_partition_with_environment_context(con oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_drop_partition_with_environment_context() +void ThriftHiveMetastoreClient::recv_get_table_req(GetTableResult& _return) { int32_t rseqid = 0; @@ -45745,20 +45970,20 @@ bool ThriftHiveMetastoreClient::recv_drop_partition_with_environment_context() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition_with_environment_context") != 0) { + if (fname.compare("get_table_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_drop_partition_with_environment_context_presult result; + ThriftHiveMetastore_get_table_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; + // _return pointer has now been filled + return; } if (result.__isset.o1) { throw result.o1; @@ -45766,25 +45991,22 @@ bool ThriftHiveMetastoreClient::recv_drop_partition_with_environment_context() if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_req failed: unknown result"); } -bool ThriftHiveMetastoreClient::drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) +void ThriftHiveMetastoreClient::get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req) { - send_drop_partition_by_name(db_name, tbl_name, part_name, deleteData); - return recv_drop_partition_by_name(); + send_get_table_objects_by_name_req(req); + recv_get_table_objects_by_name_req(_return); } -void ThriftHiveMetastoreClient::send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) +void ThriftHiveMetastoreClient::send_get_table_objects_by_name_req(const GetTablesRequest& req) { int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_objects_by_name_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_by_name_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.deleteData = &deleteData; + ThriftHiveMetastore_get_table_objects_by_name_req_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45792,7 +46014,7 @@ void ThriftHiveMetastoreClient::send_drop_partition_by_name(const std::string& d oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_drop_partition_by_name() +void ThriftHiveMetastoreClient::recv_get_table_objects_by_name_req(GetTablesResult& _return) { int32_t rseqid = 0; @@ -45812,20 +46034,20 @@ bool ThriftHiveMetastoreClient::recv_drop_partition_by_name() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition_by_name") != 0) { + if (fname.compare("get_table_objects_by_name_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_drop_partition_by_name_presult result; + ThriftHiveMetastore_get_table_objects_by_name_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; + // _return pointer has now been filled + return; } if (result.__isset.o1) { throw result.o1; @@ -45833,26 +46055,27 @@ bool ThriftHiveMetastoreClient::recv_drop_partition_by_name() if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_by_name failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_objects_by_name_req failed: unknown result"); } -bool ThriftHiveMetastoreClient::drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) { - send_drop_partition_by_name_with_environment_context(db_name, tbl_name, part_name, deleteData, environment_context); - return recv_drop_partition_by_name_with_environment_context(); + send_get_table_names_by_filter(dbname, filter, max_tables); + recv_get_table_names_by_filter(_return); } -void ThriftHiveMetastoreClient::send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables) { int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_names_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_by_name_with_environment_context_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.deleteData = &deleteData; - args.environment_context = &environment_context; + ThriftHiveMetastore_get_table_names_by_filter_pargs args; + args.dbname = &dbname; + args.filter = &filter; + args.max_tables = &max_tables; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45860,7 +46083,7 @@ void ThriftHiveMetastoreClient::send_drop_partition_by_name_with_environment_con oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_drop_partition_by_name_with_environment_context() +void ThriftHiveMetastoreClient::recv_get_table_names_by_filter(std::vector & _return) { int32_t rseqid = 0; @@ -45880,20 +46103,20 @@ bool ThriftHiveMetastoreClient::recv_drop_partition_by_name_with_environment_con iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition_by_name_with_environment_context") != 0) { + if (fname.compare("get_table_names_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_drop_partition_by_name_with_environment_context_presult result; + ThriftHiveMetastore_get_table_names_by_filter_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; + // _return pointer has now been filled + return; } if (result.__isset.o1) { throw result.o1; @@ -45901,22 +46124,27 @@ bool ThriftHiveMetastoreClient::recv_drop_partition_by_name_with_environment_con if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_by_name_with_environment_context failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_names_by_filter failed: unknown result"); } -void ThriftHiveMetastoreClient::drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req) +void ThriftHiveMetastoreClient::alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) { - send_drop_partitions_req(req); - recv_drop_partitions_req(_return); + send_alter_table(dbname, tbl_name, new_tbl); + recv_alter_table(); } -void ThriftHiveMetastoreClient::send_drop_partitions_req(const DropPartitionsRequest& req) +void ThriftHiveMetastoreClient::send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) { int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partitions_req_pargs args; - args.req = &req; + ThriftHiveMetastore_alter_table_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; + args.new_tbl = &new_tbl; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45924,7 +46152,7 @@ void ThriftHiveMetastoreClient::send_drop_partitions_req(const DropPartitionsReq oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_drop_partitions_req(DropPartitionsResult& _return) +void ThriftHiveMetastoreClient::recv_alter_table() { int32_t rseqid = 0; @@ -45944,45 +46172,41 @@ void ThriftHiveMetastoreClient::recv_drop_partitions_req(DropPartitionsResult& _ iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partitions_req") != 0) { + if (fname.compare("alter_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_drop_partitions_req_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_table_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partitions_req failed: unknown result"); + return; } -void ThriftHiveMetastoreClient::get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +void ThriftHiveMetastoreClient::alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) { - send_get_partition(db_name, tbl_name, part_vals); - recv_get_partition(_return); + send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context); + recv_alter_table_with_environment_context(); } -void ThriftHiveMetastoreClient::send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +void ThriftHiveMetastoreClient::send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_pargs args; - args.db_name = &db_name; + ThriftHiveMetastore_alter_table_with_environment_context_pargs args; + args.dbname = &dbname; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; + args.new_tbl = &new_tbl; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -45990,7 +46214,7 @@ void ThriftHiveMetastoreClient::send_get_partition(const std::string& db_name, c oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partition(Partition& _return) +void ThriftHiveMetastoreClient::recv_alter_table_with_environment_context() { int32_t rseqid = 0; @@ -46010,47 +46234,41 @@ void ThriftHiveMetastoreClient::recv_get_partition(Partition& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition") != 0) { + if (fname.compare("alter_table_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partition_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_table_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition failed: unknown result"); + return; } -void ThriftHiveMetastoreClient::exchange_partition(Partition& _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +void ThriftHiveMetastoreClient::alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) { - send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); - recv_exchange_partition(_return); + send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade); + recv_alter_table_with_cascade(); } -void ThriftHiveMetastoreClient::send_exchange_partition(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +void ThriftHiveMetastoreClient::send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) { int32_t cseqid = 0; - oprot_->writeMessageBegin("exchange_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_exchange_partition_pargs args; - args.partitionSpecs = &partitionSpecs; - args.source_db = &source_db; - args.source_table_name = &source_table_name; - args.dest_db = &dest_db; - args.dest_table_name = &dest_table_name; + ThriftHiveMetastore_alter_table_with_cascade_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; + args.new_tbl = &new_tbl; + args.cascade = &cascade; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46058,7 +46276,7 @@ void ThriftHiveMetastoreClient::send_exchange_partition(const std::mapgetTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_exchange_partition(Partition& _return) +void ThriftHiveMetastoreClient::recv_alter_table_with_cascade() { int32_t rseqid = 0; @@ -46078,53 +46296,38 @@ void ThriftHiveMetastoreClient::recv_exchange_partition(Partition& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("exchange_partition") != 0) { + if (fname.compare("alter_table_with_cascade") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_exchange_partition_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_table_with_cascade_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - if (result.__isset.o3) { - throw result.o3; - } - if (result.__isset.o4) { - throw result.o4; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "exchange_partition failed: unknown result"); + return; } -void ThriftHiveMetastoreClient::exchange_partitions(std::vector & _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +void ThriftHiveMetastoreClient::add_partition(Partition& _return, const Partition& new_part) { - send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); - recv_exchange_partitions(_return); + send_add_partition(new_part); + recv_add_partition(_return); } -void ThriftHiveMetastoreClient::send_exchange_partitions(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +void ThriftHiveMetastoreClient::send_add_partition(const Partition& new_part) { int32_t cseqid = 0; - oprot_->writeMessageBegin("exchange_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_exchange_partitions_pargs args; - args.partitionSpecs = &partitionSpecs; - args.source_db = &source_db; - args.source_table_name = &source_table_name; - args.dest_db = &dest_db; - args.dest_table_name = &dest_table_name; + ThriftHiveMetastore_add_partition_pargs args; + args.new_part = &new_part; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46132,7 +46335,7 @@ void ThriftHiveMetastoreClient::send_exchange_partitions(const std::mapgetTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_exchange_partitions(std::vector & _return) +void ThriftHiveMetastoreClient::recv_add_partition(Partition& _return) { int32_t rseqid = 0; @@ -46152,12 +46355,12 @@ void ThriftHiveMetastoreClient::recv_exchange_partitions(std::vector iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("exchange_partitions") != 0) { + if (fname.compare("add_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_exchange_partitions_presult result; + ThriftHiveMetastore_add_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -46176,29 +46379,23 @@ void ThriftHiveMetastoreClient::recv_exchange_partitions(std::vector if (result.__isset.o3) { throw result.o3; } - if (result.__isset.o4) { - throw result.o4; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "exchange_partitions failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition failed: unknown result"); } -void ThriftHiveMetastoreClient::get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreClient::add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context) { - send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names); - recv_get_partition_with_auth(_return); + send_add_partition_with_environment_context(new_part, environment_context); + recv_add_partition_with_environment_context(_return); } -void ThriftHiveMetastoreClient::send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreClient::send_add_partition_with_environment_context(const Partition& new_part, const EnvironmentContext& environment_context) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_partition_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_with_auth_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.user_name = &user_name; - args.group_names = &group_names; + ThriftHiveMetastore_add_partition_with_environment_context_pargs args; + args.new_part = &new_part; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46206,7 +46403,7 @@ void ThriftHiveMetastoreClient::send_get_partition_with_auth(const std::string& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partition_with_auth(Partition& _return) +void ThriftHiveMetastoreClient::recv_add_partition_with_environment_context(Partition& _return) { int32_t rseqid = 0; @@ -46226,12 +46423,12 @@ void ThriftHiveMetastoreClient::recv_get_partition_with_auth(Partition& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_with_auth") != 0) { + if (fname.compare("add_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partition_with_auth_presult result; + ThriftHiveMetastore_add_partition_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -46247,24 +46444,25 @@ void ThriftHiveMetastoreClient::recv_get_partition_with_auth(Partition& _return) if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_with_auth failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition_with_environment_context failed: unknown result"); } -void ThriftHiveMetastoreClient::get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +int32_t ThriftHiveMetastoreClient::add_partitions(const std::vector & new_parts) { - send_get_partition_by_name(db_name, tbl_name, part_name); - recv_get_partition_by_name(_return); + send_add_partitions(new_parts); + return recv_add_partitions(); } -void ThriftHiveMetastoreClient::send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +void ThriftHiveMetastoreClient::send_add_partitions(const std::vector & new_parts) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_by_name_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_name = &part_name; + ThriftHiveMetastore_add_partitions_pargs args; + args.new_parts = &new_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46272,7 +46470,7 @@ void ThriftHiveMetastoreClient::send_get_partition_by_name(const std::string& db oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partition_by_name(Partition& _return) +int32_t ThriftHiveMetastoreClient::recv_add_partitions() { int32_t rseqid = 0; @@ -46292,20 +46490,20 @@ void ThriftHiveMetastoreClient::recv_get_partition_by_name(Partition& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_by_name") != 0) { + if (fname.compare("add_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partition_by_name_presult result; + int32_t _return; + ThriftHiveMetastore_add_partitions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } if (result.__isset.o1) { throw result.o1; @@ -46313,24 +46511,25 @@ void ThriftHiveMetastoreClient::recv_get_partition_by_name(Partition& _return) if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_by_name failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions failed: unknown result"); } -void ThriftHiveMetastoreClient::get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) +int32_t ThriftHiveMetastoreClient::add_partitions_pspec(const std::vector & new_parts) { - send_get_partitions(db_name, tbl_name, max_parts); - recv_get_partitions(_return); + send_add_partitions_pspec(new_parts); + return recv_add_partitions_pspec(); } -void ThriftHiveMetastoreClient::send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) +void ThriftHiveMetastoreClient::send_add_partitions_pspec(const std::vector & new_parts) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.max_parts = &max_parts; + ThriftHiveMetastore_add_partitions_pspec_pargs args; + args.new_parts = &new_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46338,7 +46537,7 @@ void ThriftHiveMetastoreClient::send_get_partitions(const std::string& db_name, oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partitions(std::vector & _return) +int32_t ThriftHiveMetastoreClient::recv_add_partitions_pspec() { int32_t rseqid = 0; @@ -46358,20 +46557,20 @@ void ThriftHiveMetastoreClient::recv_get_partitions(std::vector & _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions") != 0) { + if (fname.compare("add_partitions_pspec") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partitions_presult result; + int32_t _return; + ThriftHiveMetastore_add_partitions_pspec_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } if (result.__isset.o1) { throw result.o1; @@ -46379,26 +46578,27 @@ void ThriftHiveMetastoreClient::recv_get_partitions(std::vector & _re if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_pspec failed: unknown result"); } -void ThriftHiveMetastoreClient::get_partitions_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreClient::append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { - send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names); - recv_get_partitions_with_auth(_return); + send_append_partition(db_name, tbl_name, part_vals); + recv_append_partition(_return); } -void ThriftHiveMetastoreClient::send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreClient::send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_partitions_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_with_auth_pargs args; + ThriftHiveMetastore_append_partition_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.max_parts = &max_parts; - args.user_name = &user_name; - args.group_names = &group_names; + args.part_vals = &part_vals; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46406,7 +46606,7 @@ void ThriftHiveMetastoreClient::send_get_partitions_with_auth(const std::string& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partitions_with_auth(std::vector & _return) +void ThriftHiveMetastoreClient::recv_append_partition(Partition& _return) { int32_t rseqid = 0; @@ -46426,12 +46626,12 @@ void ThriftHiveMetastoreClient::recv_get_partitions_with_auth(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_with_auth") != 0) { + if (fname.compare("append_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partitions_with_auth_presult result; + ThriftHiveMetastore_append_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -46447,24 +46647,25 @@ void ThriftHiveMetastoreClient::recv_get_partitions_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) +void ThriftHiveMetastoreClient::add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request) { - send_get_partitions_pspec(db_name, tbl_name, max_parts); - recv_get_partitions_pspec(_return); + send_add_partitions_req(request); + recv_add_partitions_req(_return); } -void ThriftHiveMetastoreClient::send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) +void ThriftHiveMetastoreClient::send_add_partitions_req(const AddPartitionsRequest& request) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_pspec_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.max_parts = &max_parts; + ThriftHiveMetastore_add_partitions_req_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46472,7 +46673,7 @@ void ThriftHiveMetastoreClient::send_get_partitions_pspec(const std::string& db_ oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partitions_pspec(std::vector & _return) +void ThriftHiveMetastoreClient::recv_add_partitions_req(AddPartitionsResult& _return) { int32_t rseqid = 0; @@ -46492,12 +46693,12 @@ void ThriftHiveMetastoreClient::recv_get_partitions_pspec(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_pspec") != 0) { + if (fname.compare("add_partitions_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partitions_pspec_presult result; + ThriftHiveMetastore_add_partitions_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -46513,24 +46714,28 @@ void ThriftHiveMetastoreClient::recv_get_partitions_pspec(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) +void ThriftHiveMetastoreClient::append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) { - send_get_partition_names(db_name, tbl_name, max_parts); - recv_get_partition_names(_return); + send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context); + recv_append_partition_with_environment_context(_return); } -void ThriftHiveMetastoreClient::send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) +void ThriftHiveMetastoreClient::send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_partition_names", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_names_pargs args; + ThriftHiveMetastore_append_partition_with_environment_context_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.max_parts = &max_parts; + args.part_vals = &part_vals; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46538,7 +46743,7 @@ void ThriftHiveMetastoreClient::send_get_partition_names(const std::string& db_n oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partition_names(std::vector & _return) +void ThriftHiveMetastoreClient::recv_append_partition_with_environment_context(Partition& _return) { int32_t rseqid = 0; @@ -46558,12 +46763,12 @@ void ThriftHiveMetastoreClient::recv_get_partition_names(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_names") != 0) { + if (fname.compare("append_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partition_names_presult result; + ThriftHiveMetastore_append_partition_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -46579,22 +46784,27 @@ void ThriftHiveMetastoreClient::recv_get_partition_names(std::vectorwriteMessageBegin("get_partition_values", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_values_pargs args; - args.request = &request; + ThriftHiveMetastore_append_partition_by_name_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46602,7 +46812,7 @@ void ThriftHiveMetastoreClient::send_get_partition_values(const PartitionValuesR oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partition_values(PartitionValuesResponse& _return) +void ThriftHiveMetastoreClient::recv_append_partition_by_name(Partition& _return) { int32_t rseqid = 0; @@ -46622,12 +46832,12 @@ void ThriftHiveMetastoreClient::recv_get_partition_values(PartitionValuesRespons iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_values") != 0) { + if (fname.compare("append_partition_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partition_values_presult result; + ThriftHiveMetastore_append_partition_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -46643,25 +46853,28 @@ void ThriftHiveMetastoreClient::recv_get_partition_values(PartitionValuesRespons if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_values failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name failed: unknown result"); } -void ThriftHiveMetastoreClient::get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +void ThriftHiveMetastoreClient::append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) { - send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts); - recv_get_partitions_ps(_return); + send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context); + recv_append_partition_by_name_with_environment_context(_return); } -void ThriftHiveMetastoreClient::send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +void ThriftHiveMetastoreClient::send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_partitions_ps", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_ps_pargs args; + ThriftHiveMetastore_append_partition_by_name_with_environment_context_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.max_parts = &max_parts; + args.part_name = &part_name; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46669,7 +46882,7 @@ void ThriftHiveMetastoreClient::send_get_partitions_ps(const std::string& db_nam oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partitions_ps(std::vector & _return) +void ThriftHiveMetastoreClient::recv_append_partition_by_name_with_environment_context(Partition& _return) { int32_t rseqid = 0; @@ -46689,12 +46902,12 @@ void ThriftHiveMetastoreClient::recv_get_partitions_ps(std::vector & iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_ps") != 0) { + if (fname.compare("append_partition_by_name_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partitions_ps_presult result; + ThriftHiveMetastore_append_partition_by_name_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -46710,27 +46923,28 @@ void ThriftHiveMetastoreClient::recv_get_partitions_ps(std::vector & if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_ps failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name_with_environment_context failed: unknown result"); } -void ThriftHiveMetastoreClient::get_partitions_ps_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +bool ThriftHiveMetastoreClient::drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) { - send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names); - recv_get_partitions_ps_with_auth(_return); + send_drop_partition(db_name, tbl_name, part_vals, deleteData); + return recv_drop_partition(); } -void ThriftHiveMetastoreClient::send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreClient::send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_partitions_ps_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_ps_with_auth_pargs args; + ThriftHiveMetastore_drop_partition_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.part_vals = &part_vals; - args.max_parts = &max_parts; - args.user_name = &user_name; - args.group_names = &group_names; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46738,7 +46952,7 @@ void ThriftHiveMetastoreClient::send_get_partitions_ps_with_auth(const std::stri oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partitions_ps_with_auth(std::vector & _return) +bool ThriftHiveMetastoreClient::recv_drop_partition() { int32_t rseqid = 0; @@ -46758,20 +46972,20 @@ void ThriftHiveMetastoreClient::recv_get_partitions_ps_with_auth(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_ps_with_auth") != 0) { + if (fname.compare("drop_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partitions_ps_with_auth_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } if (result.__isset.o1) { throw result.o1; @@ -46779,25 +46993,26 @@ void ThriftHiveMetastoreClient::recv_get_partitions_ps_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +bool ThriftHiveMetastoreClient::drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) { - send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts); - recv_get_partition_names_ps(_return); + send_drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, environment_context); + return recv_drop_partition_with_environment_context(); } -void ThriftHiveMetastoreClient::send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +void ThriftHiveMetastoreClient::send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_partition_names_ps", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_names_ps_pargs args; + ThriftHiveMetastore_drop_partition_with_environment_context_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.part_vals = &part_vals; - args.max_parts = &max_parts; + args.deleteData = &deleteData; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46805,7 +47020,7 @@ void ThriftHiveMetastoreClient::send_get_partition_names_ps(const std::string& d oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partition_names_ps(std::vector & _return) +bool ThriftHiveMetastoreClient::recv_drop_partition_with_environment_context() { int32_t rseqid = 0; @@ -46825,20 +47040,20 @@ void ThriftHiveMetastoreClient::recv_get_partition_names_ps(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_names_ps") != 0) { + if (fname.compare("drop_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partition_names_ps_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } if (result.__isset.o1) { throw result.o1; @@ -46846,25 +47061,25 @@ void ThriftHiveMetastoreClient::recv_get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) +bool ThriftHiveMetastoreClient::drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) { - send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts); - recv_get_partitions_by_filter(_return); + send_drop_partition_by_name(db_name, tbl_name, part_name, deleteData); + return recv_drop_partition_by_name(); } -void ThriftHiveMetastoreClient::send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) +void ThriftHiveMetastoreClient::send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_by_filter_pargs args; + ThriftHiveMetastore_drop_partition_by_name_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.filter = &filter; - args.max_parts = &max_parts; + args.part_name = &part_name; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46872,7 +47087,7 @@ void ThriftHiveMetastoreClient::send_get_partitions_by_filter(const std::string& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partitions_by_filter(std::vector & _return) +bool ThriftHiveMetastoreClient::recv_drop_partition_by_name() { int32_t rseqid = 0; @@ -46892,20 +47107,20 @@ void ThriftHiveMetastoreClient::recv_get_partitions_by_filter(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_by_filter") != 0) { + if (fname.compare("drop_partition_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partitions_by_filter_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } if (result.__isset.o1) { throw result.o1; @@ -46913,25 +47128,26 @@ void ThriftHiveMetastoreClient::recv_get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) +bool ThriftHiveMetastoreClient::drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) { - send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts); - recv_get_part_specs_by_filter(_return); + send_drop_partition_by_name_with_environment_context(db_name, tbl_name, part_name, deleteData, environment_context); + return recv_drop_partition_by_name_with_environment_context(); } -void ThriftHiveMetastoreClient::send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) +void ThriftHiveMetastoreClient::send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_part_specs_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_part_specs_by_filter_pargs args; + ThriftHiveMetastore_drop_partition_by_name_with_environment_context_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.filter = &filter; - args.max_parts = &max_parts; + args.part_name = &part_name; + args.deleteData = &deleteData; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46939,7 +47155,7 @@ void ThriftHiveMetastoreClient::send_get_part_specs_by_filter(const std::string& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_part_specs_by_filter(std::vector & _return) +bool ThriftHiveMetastoreClient::recv_drop_partition_by_name_with_environment_context() { int32_t rseqid = 0; @@ -46959,20 +47175,20 @@ void ThriftHiveMetastoreClient::recv_get_part_specs_by_filter(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_part_specs_by_filter") != 0) { + if (fname.compare("drop_partition_by_name_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_part_specs_by_filter_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_by_name_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } if (result.__isset.o1) { throw result.o1; @@ -46980,21 +47196,21 @@ void ThriftHiveMetastoreClient::recv_get_part_specs_by_filter(std::vectorwriteMessageBegin("get_partitions_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_by_expr_pargs args; + ThriftHiveMetastore_drop_partitions_req_pargs args; args.req = &req; args.write(oprot_); @@ -47003,7 +47219,7 @@ void ThriftHiveMetastoreClient::send_get_partitions_by_expr(const PartitionsByEx oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partitions_by_expr(PartitionsByExprResult& _return) +void ThriftHiveMetastoreClient::recv_drop_partitions_req(DropPartitionsResult& _return) { int32_t rseqid = 0; @@ -47023,12 +47239,12 @@ void ThriftHiveMetastoreClient::recv_get_partitions_by_expr(PartitionsByExprResu iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_by_expr") != 0) { + if (fname.compare("drop_partitions_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partitions_by_expr_presult result; + ThriftHiveMetastore_drop_partitions_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -47044,24 +47260,24 @@ void ThriftHiveMetastoreClient::recv_get_partitions_by_expr(PartitionsByExprResu if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_expr failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partitions_req failed: unknown result"); } -int32_t ThriftHiveMetastoreClient::get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) +void ThriftHiveMetastoreClient::get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { - send_get_num_partitions_by_filter(db_name, tbl_name, filter); - return recv_get_num_partitions_by_filter(); + send_get_partition(db_name, tbl_name, part_vals); + recv_get_partition(_return); } -void ThriftHiveMetastoreClient::send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) +void ThriftHiveMetastoreClient::send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_num_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_num_partitions_by_filter_pargs args; + ThriftHiveMetastore_get_partition_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.filter = &filter; + args.part_vals = &part_vals; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47069,7 +47285,7 @@ void ThriftHiveMetastoreClient::send_get_num_partitions_by_filter(const std::str oprot_->getTransport()->flush(); } -int32_t ThriftHiveMetastoreClient::recv_get_num_partitions_by_filter() +void ThriftHiveMetastoreClient::recv_get_partition(Partition& _return) { int32_t rseqid = 0; @@ -47089,20 +47305,20 @@ int32_t ThriftHiveMetastoreClient::recv_get_num_partitions_by_filter() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_num_partitions_by_filter") != 0) { + if (fname.compare("get_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - int32_t _return; - ThriftHiveMetastore_get_num_partitions_by_filter_presult result; + ThriftHiveMetastore_get_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; + // _return pointer has now been filled + return; } if (result.__isset.o1) { throw result.o1; @@ -47110,24 +47326,26 @@ int32_t ThriftHiveMetastoreClient::recv_get_num_partitions_by_filter() if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_num_partitions_by_filter failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition failed: unknown result"); } -void ThriftHiveMetastoreClient::get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) +void ThriftHiveMetastoreClient::exchange_partition(Partition& _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) { - send_get_partitions_by_names(db_name, tbl_name, names); - recv_get_partitions_by_names(_return); + send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); + recv_exchange_partition(_return); } -void ThriftHiveMetastoreClient::send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names) +void ThriftHiveMetastoreClient::send_exchange_partition(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("exchange_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_by_names_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.names = &names; + ThriftHiveMetastore_exchange_partition_pargs args; + args.partitionSpecs = &partitionSpecs; + args.source_db = &source_db; + args.source_table_name = &source_table_name; + args.dest_db = &dest_db; + args.dest_table_name = &dest_table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47135,7 +47353,7 @@ void ThriftHiveMetastoreClient::send_get_partitions_by_names(const std::string& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partitions_by_names(std::vector & _return) +void ThriftHiveMetastoreClient::recv_exchange_partition(Partition& _return) { int32_t rseqid = 0; @@ -47155,12 +47373,12 @@ void ThriftHiveMetastoreClient::recv_get_partitions_by_names(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_by_names") != 0) { + if (fname.compare("exchange_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partitions_by_names_presult result; + ThriftHiveMetastore_exchange_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -47176,24 +47394,32 @@ void ThriftHiveMetastoreClient::recv_get_partitions_by_names(std::vector & _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) { - send_alter_partition(db_name, tbl_name, new_part); - recv_alter_partition(); + send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); + recv_exchange_partitions(_return); } -void ThriftHiveMetastoreClient::send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) +void ThriftHiveMetastoreClient::send_exchange_partitions(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("alter_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("exchange_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partition_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.new_part = &new_part; + ThriftHiveMetastore_exchange_partitions_pargs args; + args.partitionSpecs = &partitionSpecs; + args.source_db = &source_db; + args.source_table_name = &source_table_name; + args.dest_db = &dest_db; + args.dest_table_name = &dest_table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47201,7 +47427,7 @@ void ThriftHiveMetastoreClient::send_alter_partition(const std::string& db_name, oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_alter_partition() +void ThriftHiveMetastoreClient::recv_exchange_partitions(std::vector & _return) { int32_t rseqid = 0; @@ -47221,40 +47447,53 @@ void ThriftHiveMetastoreClient::recv_alter_partition() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partition") != 0) { + if (fname.compare("exchange_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_alter_partition_presult result; + ThriftHiveMetastore_exchange_partitions_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - return; + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "exchange_partitions failed: unknown result"); } -void ThriftHiveMetastoreClient::alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) +void ThriftHiveMetastoreClient::get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names) { - send_alter_partitions(db_name, tbl_name, new_parts); - recv_alter_partitions(); + send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names); + recv_get_partition_with_auth(_return); } -void ThriftHiveMetastoreClient::send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) +void ThriftHiveMetastoreClient::send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = 0; - oprot_->writeMessageBegin("alter_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partitions_pargs args; + ThriftHiveMetastore_get_partition_with_auth_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.new_parts = &new_parts; + args.part_vals = &part_vals; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47262,7 +47501,7 @@ void ThriftHiveMetastoreClient::send_alter_partitions(const std::string& db_name oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_alter_partitions() +void ThriftHiveMetastoreClient::recv_get_partition_with_auth(Partition& _return) { int32_t rseqid = 0; @@ -47282,41 +47521,45 @@ void ThriftHiveMetastoreClient::recv_alter_partitions() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partitions") != 0) { + if (fname.compare("get_partition_with_auth") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_alter_partitions_presult result; + ThriftHiveMetastore_get_partition_with_auth_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_with_auth failed: unknown result"); } -void ThriftHiveMetastoreClient::alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) { - send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); - recv_alter_partitions_with_environment_context(); + send_get_partition_by_name(db_name, tbl_name, part_name); + recv_get_partition_by_name(_return); } -void ThriftHiveMetastoreClient::send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partitions_with_environment_context_pargs args; + ThriftHiveMetastore_get_partition_by_name_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.new_parts = &new_parts; - args.environment_context = &environment_context; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47324,7 +47567,7 @@ void ThriftHiveMetastoreClient::send_alter_partitions_with_environment_context(c oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_alter_partitions_with_environment_context() +void ThriftHiveMetastoreClient::recv_get_partition_by_name(Partition& _return) { int32_t rseqid = 0; @@ -47344,41 +47587,45 @@ void ThriftHiveMetastoreClient::recv_alter_partitions_with_environment_context() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partitions_with_environment_context") != 0) { + if (fname.compare("get_partition_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_alter_partitions_with_environment_context_presult result; + ThriftHiveMetastore_get_partition_by_name_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_by_name failed: unknown result"); } -void ThriftHiveMetastoreClient::alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { - send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context); - recv_alter_partition_with_environment_context(); + send_get_partitions(db_name, tbl_name, max_parts); + recv_get_partitions(_return); } -void ThriftHiveMetastoreClient::send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreClient::send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { int32_t cseqid = 0; - oprot_->writeMessageBegin("alter_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partition_with_environment_context_pargs args; + ThriftHiveMetastore_get_partitions_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.new_part = &new_part; - args.environment_context = &environment_context; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47386,7 +47633,7 @@ void ThriftHiveMetastoreClient::send_alter_partition_with_environment_context(co oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_alter_partition_with_environment_context() +void ThriftHiveMetastoreClient::recv_get_partitions(std::vector & _return) { int32_t rseqid = 0; @@ -47406,41 +47653,47 @@ void ThriftHiveMetastoreClient::recv_alter_partition_with_environment_context() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partition_with_environment_context") != 0) { + if (fname.compare("get_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_alter_partition_with_environment_context_presult result; + ThriftHiveMetastore_get_partitions_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions failed: unknown result"); } -void ThriftHiveMetastoreClient::rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) +void ThriftHiveMetastoreClient::get_partitions_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { - send_rename_partition(db_name, tbl_name, part_vals, new_part); - recv_rename_partition(); + send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names); + recv_get_partitions_with_auth(_return); } -void ThriftHiveMetastoreClient::send_rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) +void ThriftHiveMetastoreClient::send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = 0; - oprot_->writeMessageBegin("rename_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_rename_partition_pargs args; + ThriftHiveMetastore_get_partitions_with_auth_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.new_part = &new_part; + args.max_parts = &max_parts; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47448,7 +47701,7 @@ void ThriftHiveMetastoreClient::send_rename_partition(const std::string& db_name oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_rename_partition() +void ThriftHiveMetastoreClient::recv_get_partitions_with_auth(std::vector & _return) { int32_t rseqid = 0; @@ -47468,39 +47721,45 @@ void ThriftHiveMetastoreClient::recv_rename_partition() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("rename_partition") != 0) { + if (fname.compare("get_partitions_with_auth") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_rename_partition_presult result; + ThriftHiveMetastore_get_partitions_with_auth_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_with_auth failed: unknown result"); } -bool ThriftHiveMetastoreClient::partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) +void ThriftHiveMetastoreClient::get_partitions_pspec(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) { - send_partition_name_has_valid_characters(part_vals, throw_exception); - return recv_partition_name_has_valid_characters(); + send_get_partitions_pspec(db_name, tbl_name, max_parts); + recv_get_partitions_pspec(_return); } -void ThriftHiveMetastoreClient::send_partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) +void ThriftHiveMetastoreClient::send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) { int32_t cseqid = 0; - oprot_->writeMessageBegin("partition_name_has_valid_characters", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_partition_name_has_valid_characters_pargs args; - args.part_vals = &part_vals; - args.throw_exception = &throw_exception; + ThriftHiveMetastore_get_partitions_pspec_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47508,7 +47767,7 @@ void ThriftHiveMetastoreClient::send_partition_name_has_valid_characters(const s oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_partition_name_has_valid_characters() +void ThriftHiveMetastoreClient::recv_get_partitions_pspec(std::vector & _return) { int32_t rseqid = 0; @@ -47528,41 +47787,45 @@ bool ThriftHiveMetastoreClient::recv_partition_name_has_valid_characters() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("partition_name_has_valid_characters") != 0) { + if (fname.compare("get_partitions_pspec") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_partition_name_has_valid_characters_presult result; + ThriftHiveMetastore_get_partitions_pspec_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; + // _return pointer has now been filled + return; } if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_has_valid_characters failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_pspec failed: unknown result"); } -void ThriftHiveMetastoreClient::get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) +void ThriftHiveMetastoreClient::get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { - send_get_config_value(name, defaultValue); - recv_get_config_value(_return); + send_get_partition_names(db_name, tbl_name, max_parts); + recv_get_partition_names(_return); } -void ThriftHiveMetastoreClient::send_get_config_value(const std::string& name, const std::string& defaultValue) +void ThriftHiveMetastoreClient::send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_config_value", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_config_value_pargs args; - args.name = &name; - args.defaultValue = &defaultValue; + ThriftHiveMetastore_get_partition_names_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47570,7 +47833,7 @@ void ThriftHiveMetastoreClient::send_get_config_value(const std::string& name, c oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_config_value(std::string& _return) +void ThriftHiveMetastoreClient::recv_get_partition_names(std::vector & _return) { int32_t rseqid = 0; @@ -47590,12 +47853,12 @@ void ThriftHiveMetastoreClient::recv_get_config_value(std::string& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_config_value") != 0) { + if (fname.compare("get_partition_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_config_value_presult result; + ThriftHiveMetastore_get_partition_names_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -47608,22 +47871,25 @@ void ThriftHiveMetastoreClient::recv_get_config_value(std::string& _return) if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_config_value failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names failed: unknown result"); } -void ThriftHiveMetastoreClient::partition_name_to_vals(std::vector & _return, const std::string& part_name) +void ThriftHiveMetastoreClient::get_partition_values(PartitionValuesResponse& _return, const PartitionValuesRequest& request) { - send_partition_name_to_vals(part_name); - recv_partition_name_to_vals(_return); + send_get_partition_values(request); + recv_get_partition_values(_return); } -void ThriftHiveMetastoreClient::send_partition_name_to_vals(const std::string& part_name) +void ThriftHiveMetastoreClient::send_get_partition_values(const PartitionValuesRequest& request) { int32_t cseqid = 0; - oprot_->writeMessageBegin("partition_name_to_vals", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_values", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_partition_name_to_vals_pargs args; - args.part_name = &part_name; + ThriftHiveMetastore_get_partition_values_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47631,7 +47897,7 @@ void ThriftHiveMetastoreClient::send_partition_name_to_vals(const std::string& p oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_partition_name_to_vals(std::vector & _return) +void ThriftHiveMetastoreClient::recv_get_partition_values(PartitionValuesResponse& _return) { int32_t rseqid = 0; @@ -47651,12 +47917,12 @@ void ThriftHiveMetastoreClient::recv_partition_name_to_vals(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("partition_name_to_vals") != 0) { + if (fname.compare("get_partition_values") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_partition_name_to_vals_presult result; + ThriftHiveMetastore_get_partition_values_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -47669,22 +47935,28 @@ void ThriftHiveMetastoreClient::recv_partition_name_to_vals(std::vector & _return, const std::string& part_name) +void ThriftHiveMetastoreClient::get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { - send_partition_name_to_spec(part_name); - recv_partition_name_to_spec(_return); + send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + recv_get_partitions_ps(_return); } -void ThriftHiveMetastoreClient::send_partition_name_to_spec(const std::string& part_name) +void ThriftHiveMetastoreClient::send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { int32_t cseqid = 0; - oprot_->writeMessageBegin("partition_name_to_spec", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_ps", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_partition_name_to_spec_pargs args; - args.part_name = &part_name; + ThriftHiveMetastore_get_partitions_ps_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47692,7 +47964,7 @@ void ThriftHiveMetastoreClient::send_partition_name_to_spec(const std::string& p oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_partition_name_to_spec(std::map & _return) +void ThriftHiveMetastoreClient::recv_get_partitions_ps(std::vector & _return) { int32_t rseqid = 0; @@ -47712,12 +47984,12 @@ void ThriftHiveMetastoreClient::recv_partition_name_to_spec(std::mapreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("partition_name_to_spec") != 0) { + if (fname.compare("get_partitions_ps") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_partition_name_to_spec_presult result; + ThriftHiveMetastore_get_partitions_ps_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -47730,25 +48002,30 @@ void ThriftHiveMetastoreClient::recv_partition_name_to_spec(std::map & part_vals, const PartitionEventType::type eventType) +void ThriftHiveMetastoreClient::get_partitions_ps_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { - send_markPartitionForEvent(db_name, tbl_name, part_vals, eventType); - recv_markPartitionForEvent(); + send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names); + recv_get_partitions_ps_with_auth(_return); } -void ThriftHiveMetastoreClient::send_markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) +void ThriftHiveMetastoreClient::send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = 0; - oprot_->writeMessageBegin("markPartitionForEvent", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_ps_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_markPartitionForEvent_pargs args; + ThriftHiveMetastore_get_partitions_ps_with_auth_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.part_vals = &part_vals; - args.eventType = &eventType; + args.max_parts = &max_parts; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47756,7 +48033,7 @@ void ThriftHiveMetastoreClient::send_markPartitionForEvent(const std::string& db oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_markPartitionForEvent() +void ThriftHiveMetastoreClient::recv_get_partitions_ps_with_auth(std::vector & _return) { int32_t rseqid = 0; @@ -47776,53 +48053,46 @@ void ThriftHiveMetastoreClient::recv_markPartitionForEvent() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("markPartitionForEvent") != 0) { + if (fname.compare("get_partitions_ps_with_auth") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_markPartitionForEvent_presult result; + ThriftHiveMetastore_get_partitions_ps_with_auth_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - if (result.__isset.o3) { - throw result.o3; - } - if (result.__isset.o4) { - throw result.o4; - } - if (result.__isset.o5) { - throw result.o5; - } - if (result.__isset.o6) { - throw result.o6; - } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_ps_with_auth failed: unknown result"); } -bool ThriftHiveMetastoreClient::isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) +void ThriftHiveMetastoreClient::get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { - send_isPartitionMarkedForEvent(db_name, tbl_name, part_vals, eventType); - return recv_isPartitionMarkedForEvent(); + send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts); + recv_get_partition_names_ps(_return); } -void ThriftHiveMetastoreClient::send_isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) +void ThriftHiveMetastoreClient::send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { int32_t cseqid = 0; - oprot_->writeMessageBegin("isPartitionMarkedForEvent", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_names_ps", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_isPartitionMarkedForEvent_pargs args; + ThriftHiveMetastore_get_partition_names_ps_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.part_vals = &part_vals; - args.eventType = &eventType; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47830,7 +48100,7 @@ void ThriftHiveMetastoreClient::send_isPartitionMarkedForEvent(const std::string oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_isPartitionMarkedForEvent() +void ThriftHiveMetastoreClient::recv_get_partition_names_ps(std::vector & _return) { int32_t rseqid = 0; @@ -47850,20 +48120,20 @@ bool ThriftHiveMetastoreClient::recv_isPartitionMarkedForEvent() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("isPartitionMarkedForEvent") != 0) { + if (fname.compare("get_partition_names_ps") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_isPartitionMarkedForEvent_presult result; + ThriftHiveMetastore_get_partition_names_ps_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; + // _return pointer has now been filled + return; } if (result.__isset.o1) { throw result.o1; @@ -47871,35 +48141,25 @@ bool ThriftHiveMetastoreClient::recv_isPartitionMarkedForEvent() if (result.__isset.o2) { throw result.o2; } - if (result.__isset.o3) { - throw result.o3; - } - if (result.__isset.o4) { - throw result.o4; - } - if (result.__isset.o5) { - throw result.o5; - } - if (result.__isset.o6) { - throw result.o6; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "isPartitionMarkedForEvent failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names_ps failed: unknown result"); } -void ThriftHiveMetastoreClient::add_index(Index& _return, const Index& new_index, const Table& index_table) +void ThriftHiveMetastoreClient::get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) { - send_add_index(new_index, index_table); - recv_add_index(_return); + send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts); + recv_get_partitions_by_filter(_return); } -void ThriftHiveMetastoreClient::send_add_index(const Index& new_index, const Table& index_table) +void ThriftHiveMetastoreClient::send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) { int32_t cseqid = 0; - oprot_->writeMessageBegin("add_index", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_index_pargs args; - args.new_index = &new_index; - args.index_table = &index_table; + ThriftHiveMetastore_get_partitions_by_filter_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.filter = &filter; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47907,7 +48167,7 @@ void ThriftHiveMetastoreClient::send_add_index(const Index& new_index, const Tab oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_add_index(Index& _return) +void ThriftHiveMetastoreClient::recv_get_partitions_by_filter(std::vector & _return) { int32_t rseqid = 0; @@ -47927,12 +48187,12 @@ void ThriftHiveMetastoreClient::recv_add_index(Index& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_index") != 0) { + if (fname.compare("get_partitions_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_add_index_presult result; + ThriftHiveMetastore_get_partitions_by_filter_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -47948,28 +48208,25 @@ void ThriftHiveMetastoreClient::recv_add_index(Index& _return) if (result.__isset.o2) { throw result.o2; } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_index failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_filter failed: unknown result"); } -void ThriftHiveMetastoreClient::alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) +void ThriftHiveMetastoreClient::get_part_specs_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) { - send_alter_index(dbname, base_tbl_name, idx_name, new_idx); - recv_alter_index(); + send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts); + recv_get_part_specs_by_filter(_return); } -void ThriftHiveMetastoreClient::send_alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) +void ThriftHiveMetastoreClient::send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) { int32_t cseqid = 0; - oprot_->writeMessageBegin("alter_index", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_part_specs_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_index_pargs args; - args.dbname = &dbname; - args.base_tbl_name = &base_tbl_name; - args.idx_name = &idx_name; - args.new_idx = &new_idx; + ThriftHiveMetastore_get_part_specs_by_filter_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.filter = &filter; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -47977,7 +48234,7 @@ void ThriftHiveMetastoreClient::send_alter_index(const std::string& dbname, cons oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_alter_index() +void ThriftHiveMetastoreClient::recv_get_part_specs_by_filter(std::vector & _return) { int32_t rseqid = 0; @@ -47997,41 +48254,43 @@ void ThriftHiveMetastoreClient::recv_alter_index() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_index") != 0) { + if (fname.compare("get_part_specs_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_alter_index_presult result; + ThriftHiveMetastore_get_part_specs_by_filter_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_part_specs_by_filter failed: unknown result"); } -bool ThriftHiveMetastoreClient::drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) +void ThriftHiveMetastoreClient::get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req) { - send_drop_index_by_name(db_name, tbl_name, index_name, deleteData); - return recv_drop_index_by_name(); + send_get_partitions_by_expr(req); + recv_get_partitions_by_expr(_return); } -void ThriftHiveMetastoreClient::send_drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) +void ThriftHiveMetastoreClient::send_get_partitions_by_expr(const PartitionsByExprRequest& req) { int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_index_by_name_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.index_name = &index_name; - args.deleteData = &deleteData; + ThriftHiveMetastore_get_partitions_by_expr_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48039,7 +48298,7 @@ void ThriftHiveMetastoreClient::send_drop_index_by_name(const std::string& db_na oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_drop_index_by_name() +void ThriftHiveMetastoreClient::recv_get_partitions_by_expr(PartitionsByExprResult& _return) { int32_t rseqid = 0; @@ -48059,20 +48318,20 @@ bool ThriftHiveMetastoreClient::recv_drop_index_by_name() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_index_by_name") != 0) { + if (fname.compare("get_partitions_by_expr") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_drop_index_by_name_presult result; + ThriftHiveMetastore_get_partitions_by_expr_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; + // _return pointer has now been filled + return; } if (result.__isset.o1) { throw result.o1; @@ -48080,24 +48339,24 @@ bool ThriftHiveMetastoreClient::recv_drop_index_by_name() if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_index_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_expr failed: unknown result"); } -void ThriftHiveMetastoreClient::get_index_by_name(Index& _return, const std::string& db_name, const std::string& tbl_name, const std::string& index_name) +int32_t ThriftHiveMetastoreClient::get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) { - send_get_index_by_name(db_name, tbl_name, index_name); - recv_get_index_by_name(_return); + send_get_num_partitions_by_filter(db_name, tbl_name, filter); + return recv_get_num_partitions_by_filter(); } -void ThriftHiveMetastoreClient::send_get_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name) +void ThriftHiveMetastoreClient::send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_num_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_index_by_name_pargs args; + ThriftHiveMetastore_get_num_partitions_by_filter_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.index_name = &index_name; + args.filter = &filter; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48105,7 +48364,7 @@ void ThriftHiveMetastoreClient::send_get_index_by_name(const std::string& db_nam oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_index_by_name(Index& _return) +int32_t ThriftHiveMetastoreClient::recv_get_num_partitions_by_filter() { int32_t rseqid = 0; @@ -48125,20 +48384,20 @@ void ThriftHiveMetastoreClient::recv_get_index_by_name(Index& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_index_by_name") != 0) { + if (fname.compare("get_num_partitions_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_index_by_name_presult result; + int32_t _return; + ThriftHiveMetastore_get_num_partitions_by_filter_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } if (result.__isset.o1) { throw result.o1; @@ -48146,24 +48405,24 @@ void ThriftHiveMetastoreClient::recv_get_index_by_name(Index& _return) if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_num_partitions_by_filter failed: unknown result"); } -void ThriftHiveMetastoreClient::get_indexes(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) +void ThriftHiveMetastoreClient::get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) { - send_get_indexes(db_name, tbl_name, max_indexes); - recv_get_indexes(_return); + send_get_partitions_by_names(db_name, tbl_name, names); + recv_get_partitions_by_names(_return); } -void ThriftHiveMetastoreClient::send_get_indexes(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) +void ThriftHiveMetastoreClient::send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_indexes", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_indexes_pargs args; + ThriftHiveMetastore_get_partitions_by_names_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.max_indexes = &max_indexes; + args.names = &names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48171,7 +48430,7 @@ void ThriftHiveMetastoreClient::send_get_indexes(const std::string& db_name, con oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_indexes(std::vector & _return) +void ThriftHiveMetastoreClient::recv_get_partitions_by_names(std::vector & _return) { int32_t rseqid = 0; @@ -48191,12 +48450,12 @@ void ThriftHiveMetastoreClient::recv_get_indexes(std::vector & _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_indexes") != 0) { + if (fname.compare("get_partitions_by_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_indexes_presult result; + ThriftHiveMetastore_get_partitions_by_names_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -48212,24 +48471,24 @@ void ThriftHiveMetastoreClient::recv_get_indexes(std::vector & _return) if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_indexes failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_names failed: unknown result"); } -void ThriftHiveMetastoreClient::get_index_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) +void ThriftHiveMetastoreClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { - send_get_index_names(db_name, tbl_name, max_indexes); - recv_get_index_names(_return); + send_alter_partition(db_name, tbl_name, new_part); + recv_alter_partition(); } -void ThriftHiveMetastoreClient::send_get_index_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) +void ThriftHiveMetastoreClient::send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_index_names", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_index_names_pargs args; + ThriftHiveMetastore_alter_partition_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.max_indexes = &max_indexes; + args.new_part = &new_part; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48237,7 +48496,7 @@ void ThriftHiveMetastoreClient::send_get_index_names(const std::string& db_name, oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_index_names(std::vector & _return) +void ThriftHiveMetastoreClient::recv_alter_partition() { int32_t rseqid = 0; @@ -48257,40 +48516,40 @@ void ThriftHiveMetastoreClient::recv_get_index_names(std::vector & iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_index_names") != 0) { + if (fname.compare("alter_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_index_names_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_partition_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; + if (result.__isset.o1) { + throw result.o1; } if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_names failed: unknown result"); + return; } -void ThriftHiveMetastoreClient::get_primary_keys(PrimaryKeysResponse& _return, const PrimaryKeysRequest& request) +void ThriftHiveMetastoreClient::alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) { - send_get_primary_keys(request); - recv_get_primary_keys(_return); + send_alter_partitions(db_name, tbl_name, new_parts); + recv_alter_partitions(); } -void ThriftHiveMetastoreClient::send_get_primary_keys(const PrimaryKeysRequest& request) +void ThriftHiveMetastoreClient::send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_primary_keys_pargs args; - args.request = &request; + ThriftHiveMetastore_alter_partitions_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.new_parts = &new_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48298,7 +48557,7 @@ void ThriftHiveMetastoreClient::send_get_primary_keys(const PrimaryKeysRequest& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_primary_keys(PrimaryKeysResponse& _return) +void ThriftHiveMetastoreClient::recv_alter_partitions() { int32_t rseqid = 0; @@ -48318,43 +48577,41 @@ void ThriftHiveMetastoreClient::recv_get_primary_keys(PrimaryKeysResponse& _retu iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_primary_keys") != 0) { + if (fname.compare("alter_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_primary_keys_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_partitions_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_primary_keys failed: unknown result"); + return; } -void ThriftHiveMetastoreClient::get_foreign_keys(ForeignKeysResponse& _return, const ForeignKeysRequest& request) +void ThriftHiveMetastoreClient::alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) { - send_get_foreign_keys(request); - recv_get_foreign_keys(_return); + send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); + recv_alter_partitions_with_environment_context(); } -void ThriftHiveMetastoreClient::send_get_foreign_keys(const ForeignKeysRequest& request) +void ThriftHiveMetastoreClient::send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_foreign_keys_pargs args; - args.request = &request; + ThriftHiveMetastore_alter_partitions_with_environment_context_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.new_parts = &new_parts; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48362,7 +48619,7 @@ void ThriftHiveMetastoreClient::send_get_foreign_keys(const ForeignKeysRequest& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_foreign_keys(ForeignKeysResponse& _return) +void ThriftHiveMetastoreClient::recv_alter_partitions_with_environment_context() { int32_t rseqid = 0; @@ -48382,43 +48639,41 @@ void ThriftHiveMetastoreClient::recv_get_foreign_keys(ForeignKeysResponse& _retu iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_foreign_keys") != 0) { + if (fname.compare("alter_partitions_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_foreign_keys_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_partitions_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_foreign_keys failed: unknown result"); + return; } -void ThriftHiveMetastoreClient::get_unique_constraints(UniqueConstraintsResponse& _return, const UniqueConstraintsRequest& request) +void ThriftHiveMetastoreClient::alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { - send_get_unique_constraints(request); - recv_get_unique_constraints(_return); + send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context); + recv_alter_partition_with_environment_context(); } -void ThriftHiveMetastoreClient::send_get_unique_constraints(const UniqueConstraintsRequest& request) +void ThriftHiveMetastoreClient::send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_unique_constraints", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_unique_constraints_pargs args; - args.request = &request; + ThriftHiveMetastore_alter_partition_with_environment_context_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.new_part = &new_part; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48426,7 +48681,7 @@ void ThriftHiveMetastoreClient::send_get_unique_constraints(const UniqueConstrai oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_unique_constraints(UniqueConstraintsResponse& _return) +void ThriftHiveMetastoreClient::recv_alter_partition_with_environment_context() { int32_t rseqid = 0; @@ -48446,43 +48701,41 @@ void ThriftHiveMetastoreClient::recv_get_unique_constraints(UniqueConstraintsRes iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_unique_constraints") != 0) { + if (fname.compare("alter_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_unique_constraints_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_partition_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_unique_constraints failed: unknown result"); + return; } -void ThriftHiveMetastoreClient::get_not_null_constraints(NotNullConstraintsResponse& _return, const NotNullConstraintsRequest& request) +void ThriftHiveMetastoreClient::rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { - send_get_not_null_constraints(request); - recv_get_not_null_constraints(_return); + send_rename_partition(db_name, tbl_name, part_vals, new_part); + recv_rename_partition(); } -void ThriftHiveMetastoreClient::send_get_not_null_constraints(const NotNullConstraintsRequest& request) +void ThriftHiveMetastoreClient::send_rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_not_null_constraints", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("rename_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_not_null_constraints_pargs args; - args.request = &request; + ThriftHiveMetastore_rename_partition_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.new_part = &new_part; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48490,7 +48743,7 @@ void ThriftHiveMetastoreClient::send_get_not_null_constraints(const NotNullConst oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_not_null_constraints(NotNullConstraintsResponse& _return) +void ThriftHiveMetastoreClient::recv_rename_partition() { int32_t rseqid = 0; @@ -48510,43 +48763,39 @@ void ThriftHiveMetastoreClient::recv_get_not_null_constraints(NotNullConstraints iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_not_null_constraints") != 0) { + if (fname.compare("rename_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_not_null_constraints_presult result; - result.success = &_return; + ThriftHiveMetastore_rename_partition_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_not_null_constraints failed: unknown result"); + return; } -bool ThriftHiveMetastoreClient::update_table_column_statistics(const ColumnStatistics& stats_obj) +bool ThriftHiveMetastoreClient::partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) { - send_update_table_column_statistics(stats_obj); - return recv_update_table_column_statistics(); + send_partition_name_has_valid_characters(part_vals, throw_exception); + return recv_partition_name_has_valid_characters(); } -void ThriftHiveMetastoreClient::send_update_table_column_statistics(const ColumnStatistics& stats_obj) +void ThriftHiveMetastoreClient::send_partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) { int32_t cseqid = 0; - oprot_->writeMessageBegin("update_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("partition_name_has_valid_characters", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_update_table_column_statistics_pargs args; - args.stats_obj = &stats_obj; + ThriftHiveMetastore_partition_name_has_valid_characters_pargs args; + args.part_vals = &part_vals; + args.throw_exception = &throw_exception; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48554,7 +48803,7 @@ void ThriftHiveMetastoreClient::send_update_table_column_statistics(const Column oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_update_table_column_statistics() +bool ThriftHiveMetastoreClient::recv_partition_name_has_valid_characters() { int32_t rseqid = 0; @@ -48574,13 +48823,13 @@ bool ThriftHiveMetastoreClient::recv_update_table_column_statistics() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("update_table_column_statistics") != 0) { + if (fname.compare("partition_name_has_valid_characters") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } bool _return; - ThriftHiveMetastore_update_table_column_statistics_presult result; + ThriftHiveMetastore_partition_name_has_valid_characters_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -48592,31 +48841,23 @@ bool ThriftHiveMetastoreClient::recv_update_table_column_statistics() if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - if (result.__isset.o4) { - throw result.o4; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_table_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_has_valid_characters failed: unknown result"); } -bool ThriftHiveMetastoreClient::update_partition_column_statistics(const ColumnStatistics& stats_obj) +void ThriftHiveMetastoreClient::get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) { - send_update_partition_column_statistics(stats_obj); - return recv_update_partition_column_statistics(); + send_get_config_value(name, defaultValue); + recv_get_config_value(_return); } -void ThriftHiveMetastoreClient::send_update_partition_column_statistics(const ColumnStatistics& stats_obj) +void ThriftHiveMetastoreClient::send_get_config_value(const std::string& name, const std::string& defaultValue) { int32_t cseqid = 0; - oprot_->writeMessageBegin("update_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_config_value", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_update_partition_column_statistics_pargs args; - args.stats_obj = &stats_obj; + ThriftHiveMetastore_get_config_value_pargs args; + args.name = &name; + args.defaultValue = &defaultValue; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48624,7 +48865,7 @@ void ThriftHiveMetastoreClient::send_update_partition_column_statistics(const Co oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_update_partition_column_statistics() +void ThriftHiveMetastoreClient::recv_get_config_value(std::string& _return) { int32_t rseqid = 0; @@ -48644,51 +48885,40 @@ bool ThriftHiveMetastoreClient::recv_update_partition_column_statistics() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("update_partition_column_statistics") != 0) { + if (fname.compare("get_config_value") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_update_partition_column_statistics_presult result; + ThriftHiveMetastore_get_config_value_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; + // _return pointer has now been filled + return; } if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - if (result.__isset.o4) { - throw result.o4; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_partition_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_config_value failed: unknown result"); } -void ThriftHiveMetastoreClient::get_table_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +void ThriftHiveMetastoreClient::partition_name_to_vals(std::vector & _return, const std::string& part_name) { - send_get_table_column_statistics(db_name, tbl_name, col_name); - recv_get_table_column_statistics(_return); + send_partition_name_to_vals(part_name); + recv_partition_name_to_vals(_return); } -void ThriftHiveMetastoreClient::send_get_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +void ThriftHiveMetastoreClient::send_partition_name_to_vals(const std::string& part_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("partition_name_to_vals", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_column_statistics_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.col_name = &col_name; + ThriftHiveMetastore_partition_name_to_vals_pargs args; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48696,7 +48926,7 @@ void ThriftHiveMetastoreClient::send_get_table_column_statistics(const std::stri oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_table_column_statistics(ColumnStatistics& _return) +void ThriftHiveMetastoreClient::recv_partition_name_to_vals(std::vector & _return) { int32_t rseqid = 0; @@ -48716,12 +48946,12 @@ void ThriftHiveMetastoreClient::recv_get_table_column_statistics(ColumnStatistic iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_column_statistics") != 0) { + if (fname.compare("partition_name_to_vals") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_table_column_statistics_presult result; + ThriftHiveMetastore_partition_name_to_vals_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -48734,34 +48964,22 @@ void ThriftHiveMetastoreClient::recv_get_table_column_statistics(ColumnStatistic if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - if (result.__isset.o4) { - throw result.o4; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_to_vals failed: unknown result"); } -void ThriftHiveMetastoreClient::get_partition_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +void ThriftHiveMetastoreClient::partition_name_to_spec(std::map & _return, const std::string& part_name) { - send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name); - recv_get_partition_column_statistics(_return); + send_partition_name_to_spec(part_name); + recv_partition_name_to_spec(_return); } -void ThriftHiveMetastoreClient::send_get_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +void ThriftHiveMetastoreClient::send_partition_name_to_spec(const std::string& part_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("partition_name_to_spec", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_column_statistics_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; + ThriftHiveMetastore_partition_name_to_spec_pargs args; args.part_name = &part_name; - args.col_name = &col_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48769,7 +48987,7 @@ void ThriftHiveMetastoreClient::send_get_partition_column_statistics(const std:: oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partition_column_statistics(ColumnStatistics& _return) +void ThriftHiveMetastoreClient::recv_partition_name_to_spec(std::map & _return) { int32_t rseqid = 0; @@ -48789,12 +49007,12 @@ void ThriftHiveMetastoreClient::recv_get_partition_column_statistics(ColumnStati iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_column_statistics") != 0) { + if (fname.compare("partition_name_to_spec") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partition_column_statistics_presult result; + ThriftHiveMetastore_partition_name_to_spec_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -48807,31 +49025,25 @@ void ThriftHiveMetastoreClient::recv_get_partition_column_statistics(ColumnStati if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - if (result.__isset.o4) { - throw result.o4; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_to_spec failed: unknown result"); } -void ThriftHiveMetastoreClient::get_table_statistics_req(TableStatsResult& _return, const TableStatsRequest& request) +void ThriftHiveMetastoreClient::markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { - send_get_table_statistics_req(request); - recv_get_table_statistics_req(_return); + send_markPartitionForEvent(db_name, tbl_name, part_vals, eventType); + recv_markPartitionForEvent(); } -void ThriftHiveMetastoreClient::send_get_table_statistics_req(const TableStatsRequest& request) +void ThriftHiveMetastoreClient::send_markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_table_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("markPartitionForEvent", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_statistics_req_pargs args; - args.request = &request; + ThriftHiveMetastore_markPartitionForEvent_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.eventType = &eventType; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48839,7 +49051,7 @@ void ThriftHiveMetastoreClient::send_get_table_statistics_req(const TableStatsRe oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_table_statistics_req(TableStatsResult& _return) +void ThriftHiveMetastoreClient::recv_markPartitionForEvent() { int32_t rseqid = 0; @@ -48859,43 +49071,53 @@ void ThriftHiveMetastoreClient::recv_get_table_statistics_req(TableStatsResult& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_statistics_req") != 0) { + if (fname.compare("markPartitionForEvent") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_table_statistics_req_presult result; - result.success = &_return; + ThriftHiveMetastore_markPartitionForEvent_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_statistics_req failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + if (result.__isset.o5) { + throw result.o5; + } + if (result.__isset.o6) { + throw result.o6; + } + return; } -void ThriftHiveMetastoreClient::get_partitions_statistics_req(PartitionsStatsResult& _return, const PartitionsStatsRequest& request) +bool ThriftHiveMetastoreClient::isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { - send_get_partitions_statistics_req(request); - recv_get_partitions_statistics_req(_return); + send_isPartitionMarkedForEvent(db_name, tbl_name, part_vals, eventType); + return recv_isPartitionMarkedForEvent(); } -void ThriftHiveMetastoreClient::send_get_partitions_statistics_req(const PartitionsStatsRequest& request) +void ThriftHiveMetastoreClient::send_isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_partitions_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("isPartitionMarkedForEvent", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_statistics_req_pargs args; - args.request = &request; + ThriftHiveMetastore_isPartitionMarkedForEvent_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.eventType = &eventType; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48903,7 +49125,7 @@ void ThriftHiveMetastoreClient::send_get_partitions_statistics_req(const Partiti oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_partitions_statistics_req(PartitionsStatsResult& _return) +bool ThriftHiveMetastoreClient::recv_isPartitionMarkedForEvent() { int32_t rseqid = 0; @@ -48923,20 +49145,20 @@ void ThriftHiveMetastoreClient::recv_get_partitions_statistics_req(PartitionsSta iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_statistics_req") != 0) { + if (fname.compare("isPartitionMarkedForEvent") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_partitions_statistics_req_presult result; + bool _return; + ThriftHiveMetastore_isPartitionMarkedForEvent_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } if (result.__isset.o1) { throw result.o1; @@ -48944,22 +49166,35 @@ void ThriftHiveMetastoreClient::recv_get_partitions_statistics_req(PartitionsSta if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_statistics_req failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + if (result.__isset.o5) { + throw result.o5; + } + if (result.__isset.o6) { + throw result.o6; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "isPartitionMarkedForEvent failed: unknown result"); } -void ThriftHiveMetastoreClient::get_aggr_stats_for(AggrStats& _return, const PartitionsStatsRequest& request) +void ThriftHiveMetastoreClient::add_index(Index& _return, const Index& new_index, const Table& index_table) { - send_get_aggr_stats_for(request); - recv_get_aggr_stats_for(_return); + send_add_index(new_index, index_table); + recv_add_index(_return); } -void ThriftHiveMetastoreClient::send_get_aggr_stats_for(const PartitionsStatsRequest& request) +void ThriftHiveMetastoreClient::send_add_index(const Index& new_index, const Table& index_table) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_index", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_aggr_stats_for_pargs args; - args.request = &request; + ThriftHiveMetastore_add_index_pargs args; + args.new_index = &new_index; + args.index_table = &index_table; args.write(oprot_); oprot_->writeMessageEnd(); @@ -48967,7 +49202,7 @@ void ThriftHiveMetastoreClient::send_get_aggr_stats_for(const PartitionsStatsReq oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_aggr_stats_for(AggrStats& _return) +void ThriftHiveMetastoreClient::recv_add_index(Index& _return) { int32_t rseqid = 0; @@ -48987,12 +49222,12 @@ void ThriftHiveMetastoreClient::recv_get_aggr_stats_for(AggrStats& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_aggr_stats_for") != 0) { + if (fname.compare("add_index") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_aggr_stats_for_presult result; + ThriftHiveMetastore_add_index_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -49008,22 +49243,28 @@ void ThriftHiveMetastoreClient::recv_get_aggr_stats_for(AggrStats& _return) if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_aggr_stats_for failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_index failed: unknown result"); } -bool ThriftHiveMetastoreClient::set_aggr_stats_for(const SetPartitionsStatsRequest& request) +void ThriftHiveMetastoreClient::alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) { - send_set_aggr_stats_for(request); - return recv_set_aggr_stats_for(); + send_alter_index(dbname, base_tbl_name, idx_name, new_idx); + recv_alter_index(); } -void ThriftHiveMetastoreClient::send_set_aggr_stats_for(const SetPartitionsStatsRequest& request) +void ThriftHiveMetastoreClient::send_alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) { int32_t cseqid = 0; - oprot_->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_index", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_set_aggr_stats_for_pargs args; - args.request = &request; + ThriftHiveMetastore_alter_index_pargs args; + args.dbname = &dbname; + args.base_tbl_name = &base_tbl_name; + args.idx_name = &idx_name; + args.new_idx = &new_idx; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49031,7 +49272,7 @@ void ThriftHiveMetastoreClient::send_set_aggr_stats_for(const SetPartitionsStats oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_set_aggr_stats_for() +void ThriftHiveMetastoreClient::recv_alter_index() { int32_t rseqid = 0; @@ -49051,52 +49292,41 @@ bool ThriftHiveMetastoreClient::recv_set_aggr_stats_for() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("set_aggr_stats_for") != 0) { + if (fname.compare("alter_index") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_set_aggr_stats_for_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_index_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - return _return; - } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - if (result.__isset.o3) { - throw result.o3; - } - if (result.__isset.o4) { - throw result.o4; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_aggr_stats_for failed: unknown result"); + return; } -bool ThriftHiveMetastoreClient::delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +bool ThriftHiveMetastoreClient::drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) { - send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name); - return recv_delete_partition_column_statistics(); + send_drop_index_by_name(db_name, tbl_name, index_name, deleteData); + return recv_drop_index_by_name(); } -void ThriftHiveMetastoreClient::send_delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +void ThriftHiveMetastoreClient::send_drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) { int32_t cseqid = 0; - oprot_->writeMessageBegin("delete_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_delete_partition_column_statistics_pargs args; + ThriftHiveMetastore_drop_index_by_name_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.col_name = &col_name; + args.index_name = &index_name; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49104,7 +49334,7 @@ void ThriftHiveMetastoreClient::send_delete_partition_column_statistics(const st oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_delete_partition_column_statistics() +bool ThriftHiveMetastoreClient::recv_drop_index_by_name() { int32_t rseqid = 0; @@ -49124,13 +49354,13 @@ bool ThriftHiveMetastoreClient::recv_delete_partition_column_statistics() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("delete_partition_column_statistics") != 0) { + if (fname.compare("drop_index_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } bool _return; - ThriftHiveMetastore_delete_partition_column_statistics_presult result; + ThriftHiveMetastore_drop_index_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -49145,30 +49375,24 @@ bool ThriftHiveMetastoreClient::recv_delete_partition_column_statistics() if (result.__isset.o2) { throw result.o2; } - if (result.__isset.o3) { - throw result.o3; - } - if (result.__isset.o4) { - throw result.o4; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_partition_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_index_by_name failed: unknown result"); } -bool ThriftHiveMetastoreClient::delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +void ThriftHiveMetastoreClient::get_index_by_name(Index& _return, const std::string& db_name, const std::string& tbl_name, const std::string& index_name) { - send_delete_table_column_statistics(db_name, tbl_name, col_name); - return recv_delete_table_column_statistics(); + send_get_index_by_name(db_name, tbl_name, index_name); + recv_get_index_by_name(_return); } -void ThriftHiveMetastoreClient::send_delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +void ThriftHiveMetastoreClient::send_get_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("delete_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_delete_table_column_statistics_pargs args; + ThriftHiveMetastore_get_index_by_name_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.col_name = &col_name; + args.index_name = &index_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49176,7 +49400,7 @@ void ThriftHiveMetastoreClient::send_delete_table_column_statistics(const std::s oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_delete_table_column_statistics() +void ThriftHiveMetastoreClient::recv_get_index_by_name(Index& _return) { int32_t rseqid = 0; @@ -49196,20 +49420,20 @@ bool ThriftHiveMetastoreClient::recv_delete_table_column_statistics() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("delete_table_column_statistics") != 0) { + if (fname.compare("get_index_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_delete_table_column_statistics_presult result; + ThriftHiveMetastore_get_index_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; + // _return pointer has now been filled + return; } if (result.__isset.o1) { throw result.o1; @@ -49217,28 +49441,24 @@ bool ThriftHiveMetastoreClient::recv_delete_table_column_statistics() if (result.__isset.o2) { throw result.o2; } - if (result.__isset.o3) { - throw result.o3; - } - if (result.__isset.o4) { - throw result.o4; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_table_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_by_name failed: unknown result"); } -void ThriftHiveMetastoreClient::create_function(const Function& func) +void ThriftHiveMetastoreClient::get_indexes(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { - send_create_function(func); - recv_create_function(); + send_get_indexes(db_name, tbl_name, max_indexes); + recv_get_indexes(_return); } -void ThriftHiveMetastoreClient::send_create_function(const Function& func) +void ThriftHiveMetastoreClient::send_get_indexes(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { int32_t cseqid = 0; - oprot_->writeMessageBegin("create_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_indexes", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_function_pargs args; - args.func = &func; + ThriftHiveMetastore_get_indexes_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.max_indexes = &max_indexes; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49246,7 +49466,7 @@ void ThriftHiveMetastoreClient::send_create_function(const Function& func) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_create_function() +void ThriftHiveMetastoreClient::recv_get_indexes(std::vector & _return) { int32_t rseqid = 0; @@ -49266,45 +49486,45 @@ void ThriftHiveMetastoreClient::recv_create_function() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_function") != 0) { + if (fname.compare("get_indexes") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_create_function_presult result; + ThriftHiveMetastore_get_indexes_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - if (result.__isset.o3) { - throw result.o3; - } - if (result.__isset.o4) { - throw result.o4; - } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_indexes failed: unknown result"); } -void ThriftHiveMetastoreClient::drop_function(const std::string& dbName, const std::string& funcName) +void ThriftHiveMetastoreClient::get_index_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { - send_drop_function(dbName, funcName); - recv_drop_function(); + send_get_index_names(db_name, tbl_name, max_indexes); + recv_get_index_names(_return); } -void ThriftHiveMetastoreClient::send_drop_function(const std::string& dbName, const std::string& funcName) +void ThriftHiveMetastoreClient::send_get_index_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_index_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_function_pargs args; - args.dbName = &dbName; - args.funcName = &funcName; + ThriftHiveMetastore_get_index_names_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.max_indexes = &max_indexes; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49312,7 +49532,7 @@ void ThriftHiveMetastoreClient::send_drop_function(const std::string& dbName, co oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_drop_function() +void ThriftHiveMetastoreClient::recv_get_index_names(std::vector & _return) { int32_t rseqid = 0; @@ -49332,40 +49552,40 @@ void ThriftHiveMetastoreClient::recv_drop_function() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_function") != 0) { + if (fname.compare("get_index_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_drop_function_presult result; + ThriftHiveMetastore_get_index_names_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - throw result.o1; + if (result.__isset.success) { + // _return pointer has now been filled + return; } - if (result.__isset.o3) { - throw result.o3; + if (result.__isset.o2) { + throw result.o2; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_names failed: unknown result"); } -void ThriftHiveMetastoreClient::alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) +void ThriftHiveMetastoreClient::get_primary_keys(PrimaryKeysResponse& _return, const PrimaryKeysRequest& request) { - send_alter_function(dbName, funcName, newFunc); - recv_alter_function(); + send_get_primary_keys(request); + recv_get_primary_keys(_return); } -void ThriftHiveMetastoreClient::send_alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) +void ThriftHiveMetastoreClient::send_get_primary_keys(const PrimaryKeysRequest& request) { int32_t cseqid = 0; - oprot_->writeMessageBegin("alter_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_function_pargs args; - args.dbName = &dbName; - args.funcName = &funcName; - args.newFunc = &newFunc; + ThriftHiveMetastore_get_primary_keys_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49373,7 +49593,7 @@ void ThriftHiveMetastoreClient::send_alter_function(const std::string& dbName, c oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_alter_function() +void ThriftHiveMetastoreClient::recv_get_primary_keys(PrimaryKeysResponse& _return) { int32_t rseqid = 0; @@ -49393,39 +49613,43 @@ void ThriftHiveMetastoreClient::recv_alter_function() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_function") != 0) { + if (fname.compare("get_primary_keys") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_alter_function_presult result; + ThriftHiveMetastore_get_primary_keys_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } if (result.__isset.o2) { throw result.o2; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_primary_keys failed: unknown result"); } -void ThriftHiveMetastoreClient::get_functions(std::vector & _return, const std::string& dbName, const std::string& pattern) +void ThriftHiveMetastoreClient::get_foreign_keys(ForeignKeysResponse& _return, const ForeignKeysRequest& request) { - send_get_functions(dbName, pattern); - recv_get_functions(_return); + send_get_foreign_keys(request); + recv_get_foreign_keys(_return); } -void ThriftHiveMetastoreClient::send_get_functions(const std::string& dbName, const std::string& pattern) +void ThriftHiveMetastoreClient::send_get_foreign_keys(const ForeignKeysRequest& request) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_functions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_functions_pargs args; - args.dbName = &dbName; - args.pattern = &pattern; + ThriftHiveMetastore_get_foreign_keys_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49433,7 +49657,7 @@ void ThriftHiveMetastoreClient::send_get_functions(const std::string& dbName, co oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_functions(std::vector & _return) +void ThriftHiveMetastoreClient::recv_get_foreign_keys(ForeignKeysResponse& _return) { int32_t rseqid = 0; @@ -49453,12 +49677,12 @@ void ThriftHiveMetastoreClient::recv_get_functions(std::vector & _r iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_functions") != 0) { + if (fname.compare("get_foreign_keys") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_functions_presult result; + ThriftHiveMetastore_get_foreign_keys_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -49471,23 +49695,25 @@ void ThriftHiveMetastoreClient::recv_get_functions(std::vector & _r if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_functions failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_foreign_keys failed: unknown result"); } -void ThriftHiveMetastoreClient::get_function(Function& _return, const std::string& dbName, const std::string& funcName) +void ThriftHiveMetastoreClient::get_unique_constraints(UniqueConstraintsResponse& _return, const UniqueConstraintsRequest& request) { - send_get_function(dbName, funcName); - recv_get_function(_return); + send_get_unique_constraints(request); + recv_get_unique_constraints(_return); } -void ThriftHiveMetastoreClient::send_get_function(const std::string& dbName, const std::string& funcName) +void ThriftHiveMetastoreClient::send_get_unique_constraints(const UniqueConstraintsRequest& request) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_unique_constraints", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_function_pargs args; - args.dbName = &dbName; - args.funcName = &funcName; + ThriftHiveMetastore_get_unique_constraints_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49495,7 +49721,7 @@ void ThriftHiveMetastoreClient::send_get_function(const std::string& dbName, con oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_function(Function& _return) +void ThriftHiveMetastoreClient::recv_get_unique_constraints(UniqueConstraintsResponse& _return) { int32_t rseqid = 0; @@ -49515,12 +49741,12 @@ void ThriftHiveMetastoreClient::recv_get_function(Function& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_function") != 0) { + if (fname.compare("get_unique_constraints") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_function_presult result; + ThriftHiveMetastore_get_unique_constraints_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -49536,21 +49762,22 @@ void ThriftHiveMetastoreClient::recv_get_function(Function& _return) if (result.__isset.o2) { throw result.o2; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_function failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_unique_constraints failed: unknown result"); } -void ThriftHiveMetastoreClient::get_all_functions(GetAllFunctionsResponse& _return) +void ThriftHiveMetastoreClient::get_not_null_constraints(NotNullConstraintsResponse& _return, const NotNullConstraintsRequest& request) { - send_get_all_functions(); - recv_get_all_functions(_return); + send_get_not_null_constraints(request); + recv_get_not_null_constraints(_return); } -void ThriftHiveMetastoreClient::send_get_all_functions() +void ThriftHiveMetastoreClient::send_get_not_null_constraints(const NotNullConstraintsRequest& request) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_all_functions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_not_null_constraints", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_functions_pargs args; + ThriftHiveMetastore_get_not_null_constraints_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49558,7 +49785,7 @@ void ThriftHiveMetastoreClient::send_get_all_functions() oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_all_functions(GetAllFunctionsResponse& _return) +void ThriftHiveMetastoreClient::recv_get_not_null_constraints(NotNullConstraintsResponse& _return) { int32_t rseqid = 0; @@ -49578,12 +49805,12 @@ void ThriftHiveMetastoreClient::recv_get_all_functions(GetAllFunctionsResponse& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_functions") != 0) { + if (fname.compare("get_not_null_constraints") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_all_functions_presult result; + ThriftHiveMetastore_get_not_null_constraints_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -49596,22 +49823,25 @@ void ThriftHiveMetastoreClient::recv_get_all_functions(GetAllFunctionsResponse& if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_functions failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_not_null_constraints failed: unknown result"); } -bool ThriftHiveMetastoreClient::create_role(const Role& role) +bool ThriftHiveMetastoreClient::update_table_column_statistics(const ColumnStatistics& stats_obj) { - send_create_role(role); - return recv_create_role(); + send_update_table_column_statistics(stats_obj); + return recv_update_table_column_statistics(); } -void ThriftHiveMetastoreClient::send_create_role(const Role& role) +void ThriftHiveMetastoreClient::send_update_table_column_statistics(const ColumnStatistics& stats_obj) { int32_t cseqid = 0; - oprot_->writeMessageBegin("create_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("update_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_role_pargs args; - args.role = &role; + ThriftHiveMetastore_update_table_column_statistics_pargs args; + args.stats_obj = &stats_obj; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49619,7 +49849,7 @@ void ThriftHiveMetastoreClient::send_create_role(const Role& role) oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_create_role() +bool ThriftHiveMetastoreClient::recv_update_table_column_statistics() { int32_t rseqid = 0; @@ -49639,13 +49869,13 @@ bool ThriftHiveMetastoreClient::recv_create_role() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_role") != 0) { + if (fname.compare("update_table_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } bool _return; - ThriftHiveMetastore_create_role_presult result; + ThriftHiveMetastore_update_table_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -49657,22 +49887,31 @@ bool ThriftHiveMetastoreClient::recv_create_role() if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_role failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_table_column_statistics failed: unknown result"); } -bool ThriftHiveMetastoreClient::drop_role(const std::string& role_name) +bool ThriftHiveMetastoreClient::update_partition_column_statistics(const ColumnStatistics& stats_obj) { - send_drop_role(role_name); - return recv_drop_role(); + send_update_partition_column_statistics(stats_obj); + return recv_update_partition_column_statistics(); } -void ThriftHiveMetastoreClient::send_drop_role(const std::string& role_name) +void ThriftHiveMetastoreClient::send_update_partition_column_statistics(const ColumnStatistics& stats_obj) { int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("update_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_role_pargs args; - args.role_name = &role_name; + ThriftHiveMetastore_update_partition_column_statistics_pargs args; + args.stats_obj = &stats_obj; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49680,7 +49919,7 @@ void ThriftHiveMetastoreClient::send_drop_role(const std::string& role_name) oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_drop_role() +bool ThriftHiveMetastoreClient::recv_update_partition_column_statistics() { int32_t rseqid = 0; @@ -49700,13 +49939,13 @@ bool ThriftHiveMetastoreClient::recv_drop_role() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_role") != 0) { + if (fname.compare("update_partition_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } bool _return; - ThriftHiveMetastore_drop_role_presult result; + ThriftHiveMetastore_update_partition_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -49718,21 +49957,33 @@ bool ThriftHiveMetastoreClient::recv_drop_role() if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_role failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_partition_column_statistics failed: unknown result"); } -void ThriftHiveMetastoreClient::get_role_names(std::vector & _return) +void ThriftHiveMetastoreClient::get_table_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& col_name) { - send_get_role_names(); - recv_get_role_names(_return); + send_get_table_column_statistics(db_name, tbl_name, col_name); + recv_get_table_column_statistics(_return); } -void ThriftHiveMetastoreClient::send_get_role_names() +void ThriftHiveMetastoreClient::send_get_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_role_names", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_role_names_pargs args; + ThriftHiveMetastore_get_table_column_statistics_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.col_name = &col_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49740,7 +49991,7 @@ void ThriftHiveMetastoreClient::send_get_role_names() oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_role_names(std::vector & _return) +void ThriftHiveMetastoreClient::recv_get_table_column_statistics(ColumnStatistics& _return) { int32_t rseqid = 0; @@ -49760,12 +50011,12 @@ void ThriftHiveMetastoreClient::recv_get_role_names(std::vector & _ iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_role_names") != 0) { + if (fname.compare("get_table_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_role_names_presult result; + ThriftHiveMetastore_get_table_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -49778,27 +50029,34 @@ void ThriftHiveMetastoreClient::recv_get_role_names(std::vector & _ if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_role_names failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_column_statistics failed: unknown result"); } -bool ThriftHiveMetastoreClient::grant_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type, const std::string& grantor, const PrincipalType::type grantorType, const bool grant_option) +void ThriftHiveMetastoreClient::get_partition_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { - send_grant_role(role_name, principal_name, principal_type, grantor, grantorType, grant_option); - return recv_grant_role(); + send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name); + recv_get_partition_column_statistics(_return); } -void ThriftHiveMetastoreClient::send_grant_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type, const std::string& grantor, const PrincipalType::type grantorType, const bool grant_option) +void ThriftHiveMetastoreClient::send_get_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("grant_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_role_pargs args; - args.role_name = &role_name; - args.principal_name = &principal_name; - args.principal_type = &principal_type; - args.grantor = &grantor; - args.grantorType = &grantorType; - args.grant_option = &grant_option; + ThriftHiveMetastore_get_partition_column_statistics_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_name = &part_name; + args.col_name = &col_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49806,7 +50064,7 @@ void ThriftHiveMetastoreClient::send_grant_role(const std::string& role_name, co oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_grant_role() +void ThriftHiveMetastoreClient::recv_get_partition_column_statistics(ColumnStatistics& _return) { int32_t rseqid = 0; @@ -49826,42 +50084,49 @@ bool ThriftHiveMetastoreClient::recv_grant_role() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_role") != 0) { + if (fname.compare("get_partition_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_grant_role_presult result; + ThriftHiveMetastore_get_partition_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; + // _return pointer has now been filled + return; } if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_role failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_column_statistics failed: unknown result"); } -bool ThriftHiveMetastoreClient::revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) +void ThriftHiveMetastoreClient::get_table_statistics_req(TableStatsResult& _return, const TableStatsRequest& request) { - send_revoke_role(role_name, principal_name, principal_type); - return recv_revoke_role(); + send_get_table_statistics_req(request); + recv_get_table_statistics_req(_return); } -void ThriftHiveMetastoreClient::send_revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) +void ThriftHiveMetastoreClient::send_get_table_statistics_req(const TableStatsRequest& request) { int32_t cseqid = 0; - oprot_->writeMessageBegin("revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_revoke_role_pargs args; - args.role_name = &role_name; - args.principal_name = &principal_name; - args.principal_type = &principal_type; + ThriftHiveMetastore_get_table_statistics_req_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49869,7 +50134,7 @@ void ThriftHiveMetastoreClient::send_revoke_role(const std::string& role_name, c oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_revoke_role() +void ThriftHiveMetastoreClient::recv_get_table_statistics_req(TableStatsResult& _return) { int32_t rseqid = 0; @@ -49889,41 +50154,43 @@ bool ThriftHiveMetastoreClient::recv_revoke_role() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("revoke_role") != 0) { + if (fname.compare("get_table_statistics_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_revoke_role_presult result; + ThriftHiveMetastore_get_table_statistics_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; + // _return pointer has now been filled + return; } if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_role failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_statistics_req failed: unknown result"); } -void ThriftHiveMetastoreClient::list_roles(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type) +void ThriftHiveMetastoreClient::get_partitions_statistics_req(PartitionsStatsResult& _return, const PartitionsStatsRequest& request) { - send_list_roles(principal_name, principal_type); - recv_list_roles(_return); + send_get_partitions_statistics_req(request); + recv_get_partitions_statistics_req(_return); } -void ThriftHiveMetastoreClient::send_list_roles(const std::string& principal_name, const PrincipalType::type principal_type) +void ThriftHiveMetastoreClient::send_get_partitions_statistics_req(const PartitionsStatsRequest& request) { int32_t cseqid = 0; - oprot_->writeMessageBegin("list_roles", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_list_roles_pargs args; - args.principal_name = &principal_name; - args.principal_type = &principal_type; + ThriftHiveMetastore_get_partitions_statistics_req_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -49931,7 +50198,7 @@ void ThriftHiveMetastoreClient::send_list_roles(const std::string& principal_nam oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_list_roles(std::vector & _return) +void ThriftHiveMetastoreClient::recv_get_partitions_statistics_req(PartitionsStatsResult& _return) { int32_t rseqid = 0; @@ -49951,12 +50218,12 @@ void ThriftHiveMetastoreClient::recv_list_roles(std::vector & _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("list_roles") != 0) { + if (fname.compare("get_partitions_statistics_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_list_roles_presult result; + ThriftHiveMetastore_get_partitions_statistics_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -49969,21 +50236,24 @@ void ThriftHiveMetastoreClient::recv_list_roles(std::vector & _return) if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "list_roles failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_statistics_req failed: unknown result"); } -void ThriftHiveMetastoreClient::grant_revoke_role(GrantRevokeRoleResponse& _return, const GrantRevokeRoleRequest& request) +void ThriftHiveMetastoreClient::get_aggr_stats_for(AggrStats& _return, const PartitionsStatsRequest& request) { - send_grant_revoke_role(request); - recv_grant_revoke_role(_return); + send_get_aggr_stats_for(request); + recv_get_aggr_stats_for(_return); } -void ThriftHiveMetastoreClient::send_grant_revoke_role(const GrantRevokeRoleRequest& request) +void ThriftHiveMetastoreClient::send_get_aggr_stats_for(const PartitionsStatsRequest& request) { int32_t cseqid = 0; - oprot_->writeMessageBegin("grant_revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_revoke_role_pargs args; + ThriftHiveMetastore_get_aggr_stats_for_pargs args; args.request = &request; args.write(oprot_); @@ -49992,7 +50262,7 @@ void ThriftHiveMetastoreClient::send_grant_revoke_role(const GrantRevokeRoleRequ oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_grant_revoke_role(GrantRevokeRoleResponse& _return) +void ThriftHiveMetastoreClient::recv_get_aggr_stats_for(AggrStats& _return) { int32_t rseqid = 0; @@ -50012,12 +50282,12 @@ void ThriftHiveMetastoreClient::recv_grant_revoke_role(GrantRevokeRoleResponse& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_revoke_role") != 0) { + if (fname.compare("get_aggr_stats_for") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_grant_revoke_role_presult result; + ThriftHiveMetastore_get_aggr_stats_for_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -50030,21 +50300,24 @@ void ThriftHiveMetastoreClient::recv_grant_revoke_role(GrantRevokeRoleResponse& if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_role failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_aggr_stats_for failed: unknown result"); } -void ThriftHiveMetastoreClient::get_principals_in_role(GetPrincipalsInRoleResponse& _return, const GetPrincipalsInRoleRequest& request) +bool ThriftHiveMetastoreClient::set_aggr_stats_for(const SetPartitionsStatsRequest& request) { - send_get_principals_in_role(request); - recv_get_principals_in_role(_return); + send_set_aggr_stats_for(request); + return recv_set_aggr_stats_for(); } -void ThriftHiveMetastoreClient::send_get_principals_in_role(const GetPrincipalsInRoleRequest& request) +void ThriftHiveMetastoreClient::send_set_aggr_stats_for(const SetPartitionsStatsRequest& request) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_principals_in_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_principals_in_role_pargs args; + ThriftHiveMetastore_set_aggr_stats_for_pargs args; args.request = &request; args.write(oprot_); @@ -50053,7 +50326,7 @@ void ThriftHiveMetastoreClient::send_get_principals_in_role(const GetPrincipalsI oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_principals_in_role(GetPrincipalsInRoleResponse& _return) +bool ThriftHiveMetastoreClient::recv_set_aggr_stats_for() { int32_t rseqid = 0; @@ -50073,40 +50346,52 @@ void ThriftHiveMetastoreClient::recv_get_principals_in_role(GetPrincipalsInRoleR iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_principals_in_role") != 0) { + if (fname.compare("set_aggr_stats_for") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_principals_in_role_presult result; + bool _return; + ThriftHiveMetastore_set_aggr_stats_for_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_principals_in_role failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_aggr_stats_for failed: unknown result"); } -void ThriftHiveMetastoreClient::get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const GetRoleGrantsForPrincipalRequest& request) +bool ThriftHiveMetastoreClient::delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { - send_get_role_grants_for_principal(request); - recv_get_role_grants_for_principal(_return); + send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name); + return recv_delete_partition_column_statistics(); } -void ThriftHiveMetastoreClient::send_get_role_grants_for_principal(const GetRoleGrantsForPrincipalRequest& request) +void ThriftHiveMetastoreClient::send_delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_role_grants_for_principal", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("delete_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_role_grants_for_principal_pargs args; - args.request = &request; + ThriftHiveMetastore_delete_partition_column_statistics_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_name = &part_name; + args.col_name = &col_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50114,7 +50399,7 @@ void ThriftHiveMetastoreClient::send_get_role_grants_for_principal(const GetRole oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return) +bool ThriftHiveMetastoreClient::recv_delete_partition_column_statistics() { int32_t rseqid = 0; @@ -50134,42 +50419,51 @@ void ThriftHiveMetastoreClient::recv_get_role_grants_for_principal(GetRoleGrants iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_role_grants_for_principal") != 0) { + if (fname.compare("delete_partition_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_role_grants_for_principal_presult result; + bool _return; + ThriftHiveMetastore_delete_partition_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_role_grants_for_principal failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_partition_column_statistics failed: unknown result"); } -void ThriftHiveMetastoreClient::get_privilege_set(PrincipalPrivilegeSet& _return, const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) +bool ThriftHiveMetastoreClient::delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) { - send_get_privilege_set(hiveObject, user_name, group_names); - recv_get_privilege_set(_return); + send_delete_table_column_statistics(db_name, tbl_name, col_name); + return recv_delete_table_column_statistics(); } -void ThriftHiveMetastoreClient::send_get_privilege_set(const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreClient::send_delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_privilege_set", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("delete_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_privilege_set_pargs args; - args.hiveObject = &hiveObject; - args.user_name = &user_name; - args.group_names = &group_names; + ThriftHiveMetastore_delete_table_column_statistics_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.col_name = &col_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50177,7 +50471,7 @@ void ThriftHiveMetastoreClient::send_get_privilege_set(const HiveObjectRef& hive oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_privilege_set(PrincipalPrivilegeSet& _return) +bool ThriftHiveMetastoreClient::recv_delete_table_column_statistics() { int32_t rseqid = 0; @@ -50197,42 +50491,49 @@ void ThriftHiveMetastoreClient::recv_get_privilege_set(PrincipalPrivilegeSet& _r iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_privilege_set") != 0) { + if (fname.compare("delete_table_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_privilege_set_presult result; + bool _return; + ThriftHiveMetastore_delete_table_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_privilege_set failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_table_column_statistics failed: unknown result"); } -void ThriftHiveMetastoreClient::list_privileges(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) +void ThriftHiveMetastoreClient::create_function(const Function& func) { - send_list_privileges(principal_name, principal_type, hiveObject); - recv_list_privileges(_return); + send_create_function(func); + recv_create_function(); } -void ThriftHiveMetastoreClient::send_list_privileges(const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) +void ThriftHiveMetastoreClient::send_create_function(const Function& func) { int32_t cseqid = 0; - oprot_->writeMessageBegin("list_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_list_privileges_pargs args; - args.principal_name = &principal_name; - args.principal_type = &principal_type; - args.hiveObject = &hiveObject; + ThriftHiveMetastore_create_function_pargs args; + args.func = &func; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50240,7 +50541,7 @@ void ThriftHiveMetastoreClient::send_list_privileges(const std::string& principa oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_list_privileges(std::vector & _return) +void ThriftHiveMetastoreClient::recv_create_function() { int32_t rseqid = 0; @@ -50260,40 +50561,45 @@ void ThriftHiveMetastoreClient::recv_list_privileges(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("list_privileges") != 0) { + if (fname.compare("create_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_list_privileges_presult result; - result.success = &_return; + ThriftHiveMetastore_create_function_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "list_privileges failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + return; } -bool ThriftHiveMetastoreClient::grant_privileges(const PrivilegeBag& privileges) +void ThriftHiveMetastoreClient::drop_function(const std::string& dbName, const std::string& funcName) { - send_grant_privileges(privileges); - return recv_grant_privileges(); + send_drop_function(dbName, funcName); + recv_drop_function(); } -void ThriftHiveMetastoreClient::send_grant_privileges(const PrivilegeBag& privileges) +void ThriftHiveMetastoreClient::send_drop_function(const std::string& dbName, const std::string& funcName) { int32_t cseqid = 0; - oprot_->writeMessageBegin("grant_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_privileges_pargs args; - args.privileges = &privileges; + ThriftHiveMetastore_drop_function_pargs args; + args.dbName = &dbName; + args.funcName = &funcName; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50301,7 +50607,7 @@ void ThriftHiveMetastoreClient::send_grant_privileges(const PrivilegeBag& privil oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_grant_privileges() +void ThriftHiveMetastoreClient::recv_drop_function() { int32_t rseqid = 0; @@ -50321,40 +50627,40 @@ bool ThriftHiveMetastoreClient::recv_grant_privileges() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_privileges") != 0) { + if (fname.compare("drop_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_grant_privileges_presult result; - result.success = &_return; + ThriftHiveMetastore_drop_function_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - return _return; - } if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_privileges failed: unknown result"); + if (result.__isset.o3) { + throw result.o3; + } + return; } -bool ThriftHiveMetastoreClient::revoke_privileges(const PrivilegeBag& privileges) +void ThriftHiveMetastoreClient::alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) { - send_revoke_privileges(privileges); - return recv_revoke_privileges(); + send_alter_function(dbName, funcName, newFunc); + recv_alter_function(); } -void ThriftHiveMetastoreClient::send_revoke_privileges(const PrivilegeBag& privileges) +void ThriftHiveMetastoreClient::send_alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) { int32_t cseqid = 0; - oprot_->writeMessageBegin("revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_revoke_privileges_pargs args; - args.privileges = &privileges; + ThriftHiveMetastore_alter_function_pargs args; + args.dbName = &dbName; + args.funcName = &funcName; + args.newFunc = &newFunc; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50362,7 +50668,7 @@ void ThriftHiveMetastoreClient::send_revoke_privileges(const PrivilegeBag& privi oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_revoke_privileges() +void ThriftHiveMetastoreClient::recv_alter_function() { int32_t rseqid = 0; @@ -50382,40 +50688,39 @@ bool ThriftHiveMetastoreClient::recv_revoke_privileges() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("revoke_privileges") != 0) { + if (fname.compare("alter_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_revoke_privileges_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_function_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - return _return; - } if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_privileges failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + return; } -void ThriftHiveMetastoreClient::grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const GrantRevokePrivilegeRequest& request) +void ThriftHiveMetastoreClient::get_functions(std::vector & _return, const std::string& dbName, const std::string& pattern) { - send_grant_revoke_privileges(request); - recv_grant_revoke_privileges(_return); + send_get_functions(dbName, pattern); + recv_get_functions(_return); } -void ThriftHiveMetastoreClient::send_grant_revoke_privileges(const GrantRevokePrivilegeRequest& request) +void ThriftHiveMetastoreClient::send_get_functions(const std::string& dbName, const std::string& pattern) { int32_t cseqid = 0; - oprot_->writeMessageBegin("grant_revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_functions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_revoke_privileges_pargs args; - args.request = &request; + ThriftHiveMetastore_get_functions_pargs args; + args.dbName = &dbName; + args.pattern = &pattern; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50423,7 +50728,7 @@ void ThriftHiveMetastoreClient::send_grant_revoke_privileges(const GrantRevokePr oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_grant_revoke_privileges(GrantRevokePrivilegeResponse& _return) +void ThriftHiveMetastoreClient::recv_get_functions(std::vector & _return) { int32_t rseqid = 0; @@ -50443,12 +50748,12 @@ void ThriftHiveMetastoreClient::recv_grant_revoke_privileges(GrantRevokePrivileg iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_revoke_privileges") != 0) { + if (fname.compare("get_functions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_grant_revoke_privileges_presult result; + ThriftHiveMetastore_get_functions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -50461,23 +50766,23 @@ void ThriftHiveMetastoreClient::recv_grant_revoke_privileges(GrantRevokePrivileg if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_privileges failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_functions failed: unknown result"); } -void ThriftHiveMetastoreClient::set_ugi(std::vector & _return, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreClient::get_function(Function& _return, const std::string& dbName, const std::string& funcName) { - send_set_ugi(user_name, group_names); - recv_set_ugi(_return); + send_get_function(dbName, funcName); + recv_get_function(_return); } -void ThriftHiveMetastoreClient::send_set_ugi(const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreClient::send_get_function(const std::string& dbName, const std::string& funcName) { int32_t cseqid = 0; - oprot_->writeMessageBegin("set_ugi", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_set_ugi_pargs args; - args.user_name = &user_name; - args.group_names = &group_names; + ThriftHiveMetastore_get_function_pargs args; + args.dbName = &dbName; + args.funcName = &funcName; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50485,7 +50790,7 @@ void ThriftHiveMetastoreClient::send_set_ugi(const std::string& user_name, const oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_set_ugi(std::vector & _return) +void ThriftHiveMetastoreClient::recv_get_function(Function& _return) { int32_t rseqid = 0; @@ -50505,12 +50810,12 @@ void ThriftHiveMetastoreClient::recv_set_ugi(std::vector & _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("set_ugi") != 0) { + if (fname.compare("get_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_set_ugi_presult result; + ThriftHiveMetastore_get_function_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -50523,23 +50828,24 @@ void ThriftHiveMetastoreClient::recv_set_ugi(std::vector & _return) if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_ugi failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_function failed: unknown result"); } -void ThriftHiveMetastoreClient::get_delegation_token(std::string& _return, const std::string& token_owner, const std::string& renewer_kerberos_principal_name) +void ThriftHiveMetastoreClient::get_all_functions(GetAllFunctionsResponse& _return) { - send_get_delegation_token(token_owner, renewer_kerberos_principal_name); - recv_get_delegation_token(_return); + send_get_all_functions(); + recv_get_all_functions(_return); } -void ThriftHiveMetastoreClient::send_get_delegation_token(const std::string& token_owner, const std::string& renewer_kerberos_principal_name) +void ThriftHiveMetastoreClient::send_get_all_functions() { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_functions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_delegation_token_pargs args; - args.token_owner = &token_owner; - args.renewer_kerberos_principal_name = &renewer_kerberos_principal_name; + ThriftHiveMetastore_get_all_functions_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50547,7 +50853,7 @@ void ThriftHiveMetastoreClient::send_get_delegation_token(const std::string& tok oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_delegation_token(std::string& _return) +void ThriftHiveMetastoreClient::recv_get_all_functions(GetAllFunctionsResponse& _return) { int32_t rseqid = 0; @@ -50567,12 +50873,12 @@ void ThriftHiveMetastoreClient::recv_get_delegation_token(std::string& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_delegation_token") != 0) { + if (fname.compare("get_all_functions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_delegation_token_presult result; + ThriftHiveMetastore_get_all_functions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -50585,22 +50891,22 @@ void ThriftHiveMetastoreClient::recv_get_delegation_token(std::string& _return) if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_delegation_token failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_functions failed: unknown result"); } -int64_t ThriftHiveMetastoreClient::renew_delegation_token(const std::string& token_str_form) +bool ThriftHiveMetastoreClient::create_role(const Role& role) { - send_renew_delegation_token(token_str_form); - return recv_renew_delegation_token(); + send_create_role(role); + return recv_create_role(); } -void ThriftHiveMetastoreClient::send_renew_delegation_token(const std::string& token_str_form) +void ThriftHiveMetastoreClient::send_create_role(const Role& role) { int32_t cseqid = 0; - oprot_->writeMessageBegin("renew_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_renew_delegation_token_pargs args; - args.token_str_form = &token_str_form; + ThriftHiveMetastore_create_role_pargs args; + args.role = &role; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50608,7 +50914,7 @@ void ThriftHiveMetastoreClient::send_renew_delegation_token(const std::string& t oprot_->getTransport()->flush(); } -int64_t ThriftHiveMetastoreClient::recv_renew_delegation_token() +bool ThriftHiveMetastoreClient::recv_create_role() { int32_t rseqid = 0; @@ -50628,13 +50934,13 @@ int64_t ThriftHiveMetastoreClient::recv_renew_delegation_token() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("renew_delegation_token") != 0) { + if (fname.compare("create_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - int64_t _return; - ThriftHiveMetastore_renew_delegation_token_presult result; + bool _return; + ThriftHiveMetastore_create_role_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -50646,22 +50952,22 @@ int64_t ThriftHiveMetastoreClient::recv_renew_delegation_token() if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "renew_delegation_token failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_role failed: unknown result"); } -void ThriftHiveMetastoreClient::cancel_delegation_token(const std::string& token_str_form) +bool ThriftHiveMetastoreClient::drop_role(const std::string& role_name) { - send_cancel_delegation_token(token_str_form); - recv_cancel_delegation_token(); + send_drop_role(role_name); + return recv_drop_role(); } -void ThriftHiveMetastoreClient::send_cancel_delegation_token(const std::string& token_str_form) +void ThriftHiveMetastoreClient::send_drop_role(const std::string& role_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("cancel_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_cancel_delegation_token_pargs args; - args.token_str_form = &token_str_form; + ThriftHiveMetastore_drop_role_pargs args; + args.role_name = &role_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50669,7 +50975,7 @@ void ThriftHiveMetastoreClient::send_cancel_delegation_token(const std::string& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_cancel_delegation_token() +bool ThriftHiveMetastoreClient::recv_drop_role() { int32_t rseqid = 0; @@ -50689,36 +50995,39 @@ void ThriftHiveMetastoreClient::recv_cancel_delegation_token() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("cancel_delegation_token") != 0) { + if (fname.compare("drop_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_cancel_delegation_token_presult result; + bool _return; + ThriftHiveMetastore_drop_role_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + return _return; + } if (result.__isset.o1) { throw result.o1; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_role failed: unknown result"); } -bool ThriftHiveMetastoreClient::add_token(const std::string& token_identifier, const std::string& delegation_token) +void ThriftHiveMetastoreClient::get_role_names(std::vector & _return) { - send_add_token(token_identifier, delegation_token); - return recv_add_token(); + send_get_role_names(); + recv_get_role_names(_return); } -void ThriftHiveMetastoreClient::send_add_token(const std::string& token_identifier, const std::string& delegation_token) +void ThriftHiveMetastoreClient::send_get_role_names() { int32_t cseqid = 0; - oprot_->writeMessageBegin("add_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_role_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_token_pargs args; - args.token_identifier = &token_identifier; - args.delegation_token = &delegation_token; + ThriftHiveMetastore_get_role_names_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50726,7 +51035,7 @@ void ThriftHiveMetastoreClient::send_add_token(const std::string& token_identifi oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_add_token() +void ThriftHiveMetastoreClient::recv_get_role_names(std::vector & _return) { int32_t rseqid = 0; @@ -50746,37 +51055,45 @@ bool ThriftHiveMetastoreClient::recv_add_token() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_token") != 0) { + if (fname.compare("get_role_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_add_token_presult result; + ThriftHiveMetastore_get_role_names_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; + // _return pointer has now been filled + return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_token failed: unknown result"); + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_role_names failed: unknown result"); } -bool ThriftHiveMetastoreClient::remove_token(const std::string& token_identifier) +bool ThriftHiveMetastoreClient::grant_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type, const std::string& grantor, const PrincipalType::type grantorType, const bool grant_option) { - send_remove_token(token_identifier); - return recv_remove_token(); + send_grant_role(role_name, principal_name, principal_type, grantor, grantorType, grant_option); + return recv_grant_role(); } -void ThriftHiveMetastoreClient::send_remove_token(const std::string& token_identifier) +void ThriftHiveMetastoreClient::send_grant_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type, const std::string& grantor, const PrincipalType::type grantorType, const bool grant_option) { int32_t cseqid = 0; - oprot_->writeMessageBegin("remove_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_remove_token_pargs args; - args.token_identifier = &token_identifier; + ThriftHiveMetastore_grant_role_pargs args; + args.role_name = &role_name; + args.principal_name = &principal_name; + args.principal_type = &principal_type; + args.grantor = &grantor; + args.grantorType = &grantorType; + args.grant_option = &grant_option; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50784,7 +51101,7 @@ void ThriftHiveMetastoreClient::send_remove_token(const std::string& token_ident oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_remove_token() +bool ThriftHiveMetastoreClient::recv_grant_role() { int32_t rseqid = 0; @@ -50804,13 +51121,13 @@ bool ThriftHiveMetastoreClient::recv_remove_token() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("remove_token") != 0) { + if (fname.compare("grant_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } bool _return; - ThriftHiveMetastore_remove_token_presult result; + ThriftHiveMetastore_grant_role_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -50819,22 +51136,27 @@ bool ThriftHiveMetastoreClient::recv_remove_token() if (result.__isset.success) { return _return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_token failed: unknown result"); + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_role failed: unknown result"); } -void ThriftHiveMetastoreClient::get_token(std::string& _return, const std::string& token_identifier) +bool ThriftHiveMetastoreClient::revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) { - send_get_token(token_identifier); - recv_get_token(_return); + send_revoke_role(role_name, principal_name, principal_type); + return recv_revoke_role(); } -void ThriftHiveMetastoreClient::send_get_token(const std::string& token_identifier) +void ThriftHiveMetastoreClient::send_revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_token_pargs args; - args.token_identifier = &token_identifier; + ThriftHiveMetastore_revoke_role_pargs args; + args.role_name = &role_name; + args.principal_name = &principal_name; + args.principal_type = &principal_type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50842,7 +51164,7 @@ void ThriftHiveMetastoreClient::send_get_token(const std::string& token_identifi oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_token(std::string& _return) +bool ThriftHiveMetastoreClient::recv_revoke_role() { int32_t rseqid = 0; @@ -50862,36 +51184,41 @@ void ThriftHiveMetastoreClient::recv_get_token(std::string& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_token") != 0) { + if (fname.compare("revoke_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_token_presult result; + bool _return; + ThriftHiveMetastore_revoke_role_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_token failed: unknown result"); + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_role failed: unknown result"); } -void ThriftHiveMetastoreClient::get_all_token_identifiers(std::vector & _return) +void ThriftHiveMetastoreClient::list_roles(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type) { - send_get_all_token_identifiers(); - recv_get_all_token_identifiers(_return); + send_list_roles(principal_name, principal_type); + recv_list_roles(_return); } -void ThriftHiveMetastoreClient::send_get_all_token_identifiers() +void ThriftHiveMetastoreClient::send_list_roles(const std::string& principal_name, const PrincipalType::type principal_type) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_all_token_identifiers", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("list_roles", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_token_identifiers_pargs args; + ThriftHiveMetastore_list_roles_pargs args; + args.principal_name = &principal_name; + args.principal_type = &principal_type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50899,7 +51226,7 @@ void ThriftHiveMetastoreClient::send_get_all_token_identifiers() oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_all_token_identifiers(std::vector & _return) +void ThriftHiveMetastoreClient::recv_list_roles(std::vector & _return) { int32_t rseqid = 0; @@ -50919,12 +51246,12 @@ void ThriftHiveMetastoreClient::recv_get_all_token_identifiers(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_token_identifiers") != 0) { + if (fname.compare("list_roles") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_all_token_identifiers_presult result; + ThriftHiveMetastore_list_roles_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -50934,22 +51261,25 @@ void ThriftHiveMetastoreClient::recv_get_all_token_identifiers(std::vectorwriteMessageBegin("add_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_master_key_pargs args; - args.key = &key; + ThriftHiveMetastore_grant_revoke_role_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -50957,7 +51287,7 @@ void ThriftHiveMetastoreClient::send_add_master_key(const std::string& key) oprot_->getTransport()->flush(); } -int32_t ThriftHiveMetastoreClient::recv_add_master_key() +void ThriftHiveMetastoreClient::recv_grant_revoke_role(GrantRevokeRoleResponse& _return) { int32_t rseqid = 0; @@ -50977,41 +51307,40 @@ int32_t ThriftHiveMetastoreClient::recv_add_master_key() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_master_key") != 0) { + if (fname.compare("grant_revoke_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - int32_t _return; - ThriftHiveMetastore_add_master_key_presult result; + ThriftHiveMetastore_grant_revoke_role_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; + // _return pointer has now been filled + return; } if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_master_key failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_role failed: unknown result"); } -void ThriftHiveMetastoreClient::update_master_key(const int32_t seq_number, const std::string& key) +void ThriftHiveMetastoreClient::get_principals_in_role(GetPrincipalsInRoleResponse& _return, const GetPrincipalsInRoleRequest& request) { - send_update_master_key(seq_number, key); - recv_update_master_key(); + send_get_principals_in_role(request); + recv_get_principals_in_role(_return); } -void ThriftHiveMetastoreClient::send_update_master_key(const int32_t seq_number, const std::string& key) +void ThriftHiveMetastoreClient::send_get_principals_in_role(const GetPrincipalsInRoleRequest& request) { int32_t cseqid = 0; - oprot_->writeMessageBegin("update_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_principals_in_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_update_master_key_pargs args; - args.seq_number = &seq_number; - args.key = &key; + ThriftHiveMetastore_get_principals_in_role_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51019,7 +51348,7 @@ void ThriftHiveMetastoreClient::send_update_master_key(const int32_t seq_number, oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_update_master_key() +void ThriftHiveMetastoreClient::recv_get_principals_in_role(GetPrincipalsInRoleResponse& _return) { int32_t rseqid = 0; @@ -51039,38 +51368,40 @@ void ThriftHiveMetastoreClient::recv_update_master_key() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("update_master_key") != 0) { + if (fname.compare("get_principals_in_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_update_master_key_presult result; + ThriftHiveMetastore_get_principals_in_role_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_principals_in_role failed: unknown result"); } -bool ThriftHiveMetastoreClient::remove_master_key(const int32_t key_seq) +void ThriftHiveMetastoreClient::get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const GetRoleGrantsForPrincipalRequest& request) { - send_remove_master_key(key_seq); - return recv_remove_master_key(); + send_get_role_grants_for_principal(request); + recv_get_role_grants_for_principal(_return); } -void ThriftHiveMetastoreClient::send_remove_master_key(const int32_t key_seq) +void ThriftHiveMetastoreClient::send_get_role_grants_for_principal(const GetRoleGrantsForPrincipalRequest& request) { int32_t cseqid = 0; - oprot_->writeMessageBegin("remove_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_role_grants_for_principal", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_remove_master_key_pargs args; - args.key_seq = &key_seq; + ThriftHiveMetastore_get_role_grants_for_principal_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51078,7 +51409,7 @@ void ThriftHiveMetastoreClient::send_remove_master_key(const int32_t key_seq) oprot_->getTransport()->flush(); } -bool ThriftHiveMetastoreClient::recv_remove_master_key() +void ThriftHiveMetastoreClient::recv_get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return) { int32_t rseqid = 0; @@ -51098,36 +51429,42 @@ bool ThriftHiveMetastoreClient::recv_remove_master_key() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("remove_master_key") != 0) { + if (fname.compare("get_role_grants_for_principal") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - bool _return; - ThriftHiveMetastore_remove_master_key_presult result; + ThriftHiveMetastore_get_role_grants_for_principal_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - return _return; + // _return pointer has now been filled + return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_master_key failed: unknown result"); + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_role_grants_for_principal failed: unknown result"); } -void ThriftHiveMetastoreClient::get_master_keys(std::vector & _return) +void ThriftHiveMetastoreClient::get_privilege_set(PrincipalPrivilegeSet& _return, const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) { - send_get_master_keys(); - recv_get_master_keys(_return); + send_get_privilege_set(hiveObject, user_name, group_names); + recv_get_privilege_set(_return); } -void ThriftHiveMetastoreClient::send_get_master_keys() +void ThriftHiveMetastoreClient::send_get_privilege_set(const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_master_keys", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_privilege_set", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_master_keys_pargs args; + ThriftHiveMetastore_get_privilege_set_pargs args; + args.hiveObject = &hiveObject; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51135,7 +51472,7 @@ void ThriftHiveMetastoreClient::send_get_master_keys() oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_master_keys(std::vector & _return) +void ThriftHiveMetastoreClient::recv_get_privilege_set(PrincipalPrivilegeSet& _return) { int32_t rseqid = 0; @@ -51155,12 +51492,12 @@ void ThriftHiveMetastoreClient::recv_get_master_keys(std::vector & iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_master_keys") != 0) { + if (fname.compare("get_privilege_set") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_master_keys_presult result; + ThriftHiveMetastore_get_privilege_set_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -51170,21 +51507,27 @@ void ThriftHiveMetastoreClient::recv_get_master_keys(std::vector & // _return pointer has now been filled return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_master_keys failed: unknown result"); + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_privilege_set failed: unknown result"); } -void ThriftHiveMetastoreClient::get_open_txns(GetOpenTxnsResponse& _return) +void ThriftHiveMetastoreClient::list_privileges(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) { - send_get_open_txns(); - recv_get_open_txns(_return); + send_list_privileges(principal_name, principal_type, hiveObject); + recv_list_privileges(_return); } -void ThriftHiveMetastoreClient::send_get_open_txns() +void ThriftHiveMetastoreClient::send_list_privileges(const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_open_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("list_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_open_txns_pargs args; + ThriftHiveMetastore_list_privileges_pargs args; + args.principal_name = &principal_name; + args.principal_type = &principal_type; + args.hiveObject = &hiveObject; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51192,7 +51535,7 @@ void ThriftHiveMetastoreClient::send_get_open_txns() oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_open_txns(GetOpenTxnsResponse& _return) +void ThriftHiveMetastoreClient::recv_list_privileges(std::vector & _return) { int32_t rseqid = 0; @@ -51212,12 +51555,12 @@ void ThriftHiveMetastoreClient::recv_get_open_txns(GetOpenTxnsResponse& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_open_txns") != 0) { + if (fname.compare("list_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_open_txns_presult result; + ThriftHiveMetastore_list_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -51227,21 +51570,25 @@ void ThriftHiveMetastoreClient::recv_get_open_txns(GetOpenTxnsResponse& _return) // _return pointer has now been filled return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns failed: unknown result"); + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "list_privileges failed: unknown result"); } -void ThriftHiveMetastoreClient::get_open_txns_info(GetOpenTxnsInfoResponse& _return) +bool ThriftHiveMetastoreClient::grant_privileges(const PrivilegeBag& privileges) { - send_get_open_txns_info(); - recv_get_open_txns_info(_return); + send_grant_privileges(privileges); + return recv_grant_privileges(); } -void ThriftHiveMetastoreClient::send_get_open_txns_info() +void ThriftHiveMetastoreClient::send_grant_privileges(const PrivilegeBag& privileges) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_open_txns_info", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_open_txns_info_pargs args; + ThriftHiveMetastore_grant_privileges_pargs args; + args.privileges = &privileges; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51249,7 +51596,7 @@ void ThriftHiveMetastoreClient::send_get_open_txns_info() oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_open_txns_info(GetOpenTxnsInfoResponse& _return) +bool ThriftHiveMetastoreClient::recv_grant_privileges() { int32_t rseqid = 0; @@ -51269,37 +51616,40 @@ void ThriftHiveMetastoreClient::recv_get_open_txns_info(GetOpenTxnsInfoResponse& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_open_txns_info") != 0) { + if (fname.compare("grant_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_open_txns_info_presult result; + bool _return; + ThriftHiveMetastore_grant_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns_info failed: unknown result"); + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_privileges failed: unknown result"); } -void ThriftHiveMetastoreClient::open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& rqst) +bool ThriftHiveMetastoreClient::revoke_privileges(const PrivilegeBag& privileges) { - send_open_txns(rqst); - recv_open_txns(_return); + send_revoke_privileges(privileges); + return recv_revoke_privileges(); } -void ThriftHiveMetastoreClient::send_open_txns(const OpenTxnRequest& rqst) +void ThriftHiveMetastoreClient::send_revoke_privileges(const PrivilegeBag& privileges) { int32_t cseqid = 0; - oprot_->writeMessageBegin("open_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_open_txns_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_revoke_privileges_pargs args; + args.privileges = &privileges; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51307,7 +51657,7 @@ void ThriftHiveMetastoreClient::send_open_txns(const OpenTxnRequest& rqst) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_open_txns(OpenTxnsResponse& _return) +bool ThriftHiveMetastoreClient::recv_revoke_privileges() { int32_t rseqid = 0; @@ -51327,37 +51677,40 @@ void ThriftHiveMetastoreClient::recv_open_txns(OpenTxnsResponse& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("open_txns") != 0) { + if (fname.compare("revoke_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_open_txns_presult result; + bool _return; + ThriftHiveMetastore_revoke_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "open_txns failed: unknown result"); + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_privileges failed: unknown result"); } -void ThriftHiveMetastoreClient::abort_txn(const AbortTxnRequest& rqst) +void ThriftHiveMetastoreClient::grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const GrantRevokePrivilegeRequest& request) { - send_abort_txn(rqst); - recv_abort_txn(); + send_grant_revoke_privileges(request); + recv_grant_revoke_privileges(_return); } -void ThriftHiveMetastoreClient::send_abort_txn(const AbortTxnRequest& rqst) +void ThriftHiveMetastoreClient::send_grant_revoke_privileges(const GrantRevokePrivilegeRequest& request) { int32_t cseqid = 0; - oprot_->writeMessageBegin("abort_txn", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_abort_txn_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_grant_revoke_privileges_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51365,7 +51718,7 @@ void ThriftHiveMetastoreClient::send_abort_txn(const AbortTxnRequest& rqst) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_abort_txn() +void ThriftHiveMetastoreClient::recv_grant_revoke_privileges(GrantRevokePrivilegeResponse& _return) { int32_t rseqid = 0; @@ -51385,35 +51738,41 @@ void ThriftHiveMetastoreClient::recv_abort_txn() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("abort_txn") != 0) { + if (fname.compare("grant_revoke_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_abort_txn_presult result; + ThriftHiveMetastore_grant_revoke_privileges_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_privileges failed: unknown result"); } -void ThriftHiveMetastoreClient::abort_txns(const AbortTxnsRequest& rqst) +void ThriftHiveMetastoreClient::set_ugi(std::vector & _return, const std::string& user_name, const std::vector & group_names) { - send_abort_txns(rqst); - recv_abort_txns(); + send_set_ugi(user_name, group_names); + recv_set_ugi(_return); } -void ThriftHiveMetastoreClient::send_abort_txns(const AbortTxnsRequest& rqst) +void ThriftHiveMetastoreClient::send_set_ugi(const std::string& user_name, const std::vector & group_names) { int32_t cseqid = 0; - oprot_->writeMessageBegin("abort_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("set_ugi", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_abort_txns_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_set_ugi_pargs args; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51421,7 +51780,7 @@ void ThriftHiveMetastoreClient::send_abort_txns(const AbortTxnsRequest& rqst) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_abort_txns() +void ThriftHiveMetastoreClient::recv_set_ugi(std::vector & _return) { int32_t rseqid = 0; @@ -51441,35 +51800,41 @@ void ThriftHiveMetastoreClient::recv_abort_txns() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("abort_txns") != 0) { + if (fname.compare("set_ugi") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_abort_txns_presult result; + ThriftHiveMetastore_set_ugi_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_ugi failed: unknown result"); } -void ThriftHiveMetastoreClient::commit_txn(const CommitTxnRequest& rqst) +void ThriftHiveMetastoreClient::get_delegation_token(std::string& _return, const std::string& token_owner, const std::string& renewer_kerberos_principal_name) { - send_commit_txn(rqst); - recv_commit_txn(); + send_get_delegation_token(token_owner, renewer_kerberos_principal_name); + recv_get_delegation_token(_return); } -void ThriftHiveMetastoreClient::send_commit_txn(const CommitTxnRequest& rqst) +void ThriftHiveMetastoreClient::send_get_delegation_token(const std::string& token_owner, const std::string& renewer_kerberos_principal_name) { int32_t cseqid = 0; - oprot_->writeMessageBegin("commit_txn", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_commit_txn_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_get_delegation_token_pargs args; + args.token_owner = &token_owner; + args.renewer_kerberos_principal_name = &renewer_kerberos_principal_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51477,7 +51842,7 @@ void ThriftHiveMetastoreClient::send_commit_txn(const CommitTxnRequest& rqst) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_commit_txn() +void ThriftHiveMetastoreClient::recv_get_delegation_token(std::string& _return) { int32_t rseqid = 0; @@ -51497,38 +51862,40 @@ void ThriftHiveMetastoreClient::recv_commit_txn() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("commit_txn") != 0) { + if (fname.compare("get_delegation_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_commit_txn_presult result; + ThriftHiveMetastore_get_delegation_token_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + return; + } if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_delegation_token failed: unknown result"); } -void ThriftHiveMetastoreClient::lock(LockResponse& _return, const LockRequest& rqst) +int64_t ThriftHiveMetastoreClient::renew_delegation_token(const std::string& token_str_form) { - send_lock(rqst); - recv_lock(_return); + send_renew_delegation_token(token_str_form); + return recv_renew_delegation_token(); } -void ThriftHiveMetastoreClient::send_lock(const LockRequest& rqst) +void ThriftHiveMetastoreClient::send_renew_delegation_token(const std::string& token_str_form) { int32_t cseqid = 0; - oprot_->writeMessageBegin("lock", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("renew_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_lock_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_renew_delegation_token_pargs args; + args.token_str_form = &token_str_form; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51536,7 +51903,7 @@ void ThriftHiveMetastoreClient::send_lock(const LockRequest& rqst) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_lock(LockResponse& _return) +int64_t ThriftHiveMetastoreClient::recv_renew_delegation_token() { int32_t rseqid = 0; @@ -51556,43 +51923,40 @@ void ThriftHiveMetastoreClient::recv_lock(LockResponse& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("lock") != 0) { + if (fname.compare("renew_delegation_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_lock_presult result; + int64_t _return; + ThriftHiveMetastore_renew_delegation_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "lock failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "renew_delegation_token failed: unknown result"); } -void ThriftHiveMetastoreClient::check_lock(LockResponse& _return, const CheckLockRequest& rqst) +void ThriftHiveMetastoreClient::cancel_delegation_token(const std::string& token_str_form) { - send_check_lock(rqst); - recv_check_lock(_return); + send_cancel_delegation_token(token_str_form); + recv_cancel_delegation_token(); } -void ThriftHiveMetastoreClient::send_check_lock(const CheckLockRequest& rqst) +void ThriftHiveMetastoreClient::send_cancel_delegation_token(const std::string& token_str_form) { int32_t cseqid = 0; - oprot_->writeMessageBegin("check_lock", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("cancel_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_check_lock_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_cancel_delegation_token_pargs args; + args.token_str_form = &token_str_form; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51600,7 +51964,7 @@ void ThriftHiveMetastoreClient::send_check_lock(const CheckLockRequest& rqst) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_check_lock(LockResponse& _return) +void ThriftHiveMetastoreClient::recv_cancel_delegation_token() { int32_t rseqid = 0; @@ -51620,46 +51984,36 @@ void ThriftHiveMetastoreClient::recv_check_lock(LockResponse& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("check_lock") != 0) { + if (fname.compare("cancel_delegation_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_check_lock_presult result; - result.success = &_return; + ThriftHiveMetastore_cancel_delegation_token_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "check_lock failed: unknown result"); + return; } -void ThriftHiveMetastoreClient::unlock(const UnlockRequest& rqst) +bool ThriftHiveMetastoreClient::add_token(const std::string& token_identifier, const std::string& delegation_token) { - send_unlock(rqst); - recv_unlock(); + send_add_token(token_identifier, delegation_token); + return recv_add_token(); } -void ThriftHiveMetastoreClient::send_unlock(const UnlockRequest& rqst) +void ThriftHiveMetastoreClient::send_add_token(const std::string& token_identifier, const std::string& delegation_token) { int32_t cseqid = 0; - oprot_->writeMessageBegin("unlock", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_unlock_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_add_token_pargs args; + args.token_identifier = &token_identifier; + args.delegation_token = &delegation_token; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51667,7 +52021,7 @@ void ThriftHiveMetastoreClient::send_unlock(const UnlockRequest& rqst) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_unlock() +bool ThriftHiveMetastoreClient::recv_add_token() { int32_t rseqid = 0; @@ -51687,38 +52041,37 @@ void ThriftHiveMetastoreClient::recv_unlock() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("unlock") != 0) { + if (fname.compare("add_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_unlock_presult result; + bool _return; + ThriftHiveMetastore_add_token_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; + if (result.__isset.success) { + return _return; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_token failed: unknown result"); } -void ThriftHiveMetastoreClient::show_locks(ShowLocksResponse& _return, const ShowLocksRequest& rqst) +bool ThriftHiveMetastoreClient::remove_token(const std::string& token_identifier) { - send_show_locks(rqst); - recv_show_locks(_return); + send_remove_token(token_identifier); + return recv_remove_token(); } -void ThriftHiveMetastoreClient::send_show_locks(const ShowLocksRequest& rqst) +void ThriftHiveMetastoreClient::send_remove_token(const std::string& token_identifier) { int32_t cseqid = 0; - oprot_->writeMessageBegin("show_locks", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("remove_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_show_locks_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_remove_token_pargs args; + args.token_identifier = &token_identifier; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51726,7 +52079,7 @@ void ThriftHiveMetastoreClient::send_show_locks(const ShowLocksRequest& rqst) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_show_locks(ShowLocksResponse& _return) +bool ThriftHiveMetastoreClient::recv_remove_token() { int32_t rseqid = 0; @@ -51746,37 +52099,37 @@ void ThriftHiveMetastoreClient::recv_show_locks(ShowLocksResponse& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("show_locks") != 0) { + if (fname.compare("remove_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_show_locks_presult result; + bool _return; + ThriftHiveMetastore_remove_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_locks failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_token failed: unknown result"); } -void ThriftHiveMetastoreClient::heartbeat(const HeartbeatRequest& ids) +void ThriftHiveMetastoreClient::get_token(std::string& _return, const std::string& token_identifier) { - send_heartbeat(ids); - recv_heartbeat(); + send_get_token(token_identifier); + recv_get_token(_return); } -void ThriftHiveMetastoreClient::send_heartbeat(const HeartbeatRequest& ids) +void ThriftHiveMetastoreClient::send_get_token(const std::string& token_identifier) { int32_t cseqid = 0; - oprot_->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_heartbeat_pargs args; - args.ids = &ids; + ThriftHiveMetastore_get_token_pargs args; + args.token_identifier = &token_identifier; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51784,7 +52137,7 @@ void ThriftHiveMetastoreClient::send_heartbeat(const HeartbeatRequest& ids) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_heartbeat() +void ThriftHiveMetastoreClient::recv_get_token(std::string& _return) { int32_t rseqid = 0; @@ -51804,41 +52157,36 @@ void ThriftHiveMetastoreClient::recv_heartbeat() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("heartbeat") != 0) { + if (fname.compare("get_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_heartbeat_presult result; + ThriftHiveMetastore_get_token_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; + if (result.__isset.success) { + // _return pointer has now been filled + return; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_token failed: unknown result"); } -void ThriftHiveMetastoreClient::heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const HeartbeatTxnRangeRequest& txns) +void ThriftHiveMetastoreClient::get_all_token_identifiers(std::vector & _return) { - send_heartbeat_txn_range(txns); - recv_heartbeat_txn_range(_return); + send_get_all_token_identifiers(); + recv_get_all_token_identifiers(_return); } -void ThriftHiveMetastoreClient::send_heartbeat_txn_range(const HeartbeatTxnRangeRequest& txns) +void ThriftHiveMetastoreClient::send_get_all_token_identifiers() { int32_t cseqid = 0; - oprot_->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_token_identifiers", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_heartbeat_txn_range_pargs args; - args.txns = &txns; + ThriftHiveMetastore_get_all_token_identifiers_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51846,7 +52194,7 @@ void ThriftHiveMetastoreClient::send_heartbeat_txn_range(const HeartbeatTxnRange oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_heartbeat_txn_range(HeartbeatTxnRangeResponse& _return) +void ThriftHiveMetastoreClient::recv_get_all_token_identifiers(std::vector & _return) { int32_t rseqid = 0; @@ -51866,12 +52214,12 @@ void ThriftHiveMetastoreClient::recv_heartbeat_txn_range(HeartbeatTxnRangeRespon iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("heartbeat_txn_range") != 0) { + if (fname.compare("get_all_token_identifiers") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_heartbeat_txn_range_presult result; + ThriftHiveMetastore_get_all_token_identifiers_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -51881,22 +52229,22 @@ void ThriftHiveMetastoreClient::recv_heartbeat_txn_range(HeartbeatTxnRangeRespon // _return pointer has now been filled return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_txn_range failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_token_identifiers failed: unknown result"); } -void ThriftHiveMetastoreClient::compact(const CompactionRequest& rqst) +int32_t ThriftHiveMetastoreClient::add_master_key(const std::string& key) { - send_compact(rqst); - recv_compact(); + send_add_master_key(key); + return recv_add_master_key(); } -void ThriftHiveMetastoreClient::send_compact(const CompactionRequest& rqst) +void ThriftHiveMetastoreClient::send_add_master_key(const std::string& key) { int32_t cseqid = 0; - oprot_->writeMessageBegin("compact", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_master_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_compact_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_add_master_key_pargs args; + args.key = &key; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51904,7 +52252,7 @@ void ThriftHiveMetastoreClient::send_compact(const CompactionRequest& rqst) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_compact() +int32_t ThriftHiveMetastoreClient::recv_add_master_key() { int32_t rseqid = 0; @@ -51924,32 +52272,41 @@ void ThriftHiveMetastoreClient::recv_compact() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("compact") != 0) { + if (fname.compare("add_master_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_compact_presult result; + int32_t _return; + ThriftHiveMetastore_add_master_key_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - return; + if (result.__isset.success) { + return _return; + } + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_master_key failed: unknown result"); } -void ThriftHiveMetastoreClient::compact2(CompactionResponse& _return, const CompactionRequest& rqst) +void ThriftHiveMetastoreClient::update_master_key(const int32_t seq_number, const std::string& key) { - send_compact2(rqst); - recv_compact2(_return); + send_update_master_key(seq_number, key); + recv_update_master_key(); } -void ThriftHiveMetastoreClient::send_compact2(const CompactionRequest& rqst) +void ThriftHiveMetastoreClient::send_update_master_key(const int32_t seq_number, const std::string& key) { int32_t cseqid = 0; - oprot_->writeMessageBegin("compact2", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("update_master_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_compact2_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_update_master_key_pargs args; + args.seq_number = &seq_number; + args.key = &key; args.write(oprot_); oprot_->writeMessageEnd(); @@ -51957,7 +52314,7 @@ void ThriftHiveMetastoreClient::send_compact2(const CompactionRequest& rqst) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_compact2(CompactionResponse& _return) +void ThriftHiveMetastoreClient::recv_update_master_key() { int32_t rseqid = 0; @@ -51977,37 +52334,38 @@ void ThriftHiveMetastoreClient::recv_compact2(CompactionResponse& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("compact2") != 0) { + if (fname.compare("update_master_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_compact2_presult result; - result.success = &_return; + ThriftHiveMetastore_update_master_key_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; + if (result.__isset.o1) { + throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "compact2 failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + return; } -void ThriftHiveMetastoreClient::show_compact(ShowCompactResponse& _return, const ShowCompactRequest& rqst) +bool ThriftHiveMetastoreClient::remove_master_key(const int32_t key_seq) { - send_show_compact(rqst); - recv_show_compact(_return); + send_remove_master_key(key_seq); + return recv_remove_master_key(); } -void ThriftHiveMetastoreClient::send_show_compact(const ShowCompactRequest& rqst) +void ThriftHiveMetastoreClient::send_remove_master_key(const int32_t key_seq) { int32_t cseqid = 0; - oprot_->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("remove_master_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_show_compact_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_remove_master_key_pargs args; + args.key_seq = &key_seq; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52015,7 +52373,7 @@ void ThriftHiveMetastoreClient::send_show_compact(const ShowCompactRequest& rqst oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_show_compact(ShowCompactResponse& _return) +bool ThriftHiveMetastoreClient::recv_remove_master_key() { int32_t rseqid = 0; @@ -52035,37 +52393,36 @@ void ThriftHiveMetastoreClient::recv_show_compact(ShowCompactResponse& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("show_compact") != 0) { + if (fname.compare("remove_master_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_show_compact_presult result; + bool _return; + ThriftHiveMetastore_remove_master_key_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - return; + return _return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_compact failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_master_key failed: unknown result"); } -void ThriftHiveMetastoreClient::add_dynamic_partitions(const AddDynamicPartitions& rqst) +void ThriftHiveMetastoreClient::get_master_keys(std::vector & _return) { - send_add_dynamic_partitions(rqst); - recv_add_dynamic_partitions(); + send_get_master_keys(); + recv_get_master_keys(_return); } -void ThriftHiveMetastoreClient::send_add_dynamic_partitions(const AddDynamicPartitions& rqst) +void ThriftHiveMetastoreClient::send_get_master_keys() { int32_t cseqid = 0; - oprot_->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_master_keys", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_dynamic_partitions_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_get_master_keys_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52073,7 +52430,7 @@ void ThriftHiveMetastoreClient::send_add_dynamic_partitions(const AddDynamicPart oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_add_dynamic_partitions() +void ThriftHiveMetastoreClient::recv_get_master_keys(std::vector & _return) { int32_t rseqid = 0; @@ -52093,38 +52450,36 @@ void ThriftHiveMetastoreClient::recv_add_dynamic_partitions() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_dynamic_partitions") != 0) { + if (fname.compare("get_master_keys") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_add_dynamic_partitions_presult result; + ThriftHiveMetastore_get_master_keys_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; + if (result.__isset.success) { + // _return pointer has now been filled + return; } - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_master_keys failed: unknown result"); } -void ThriftHiveMetastoreClient::get_next_notification(NotificationEventResponse& _return, const NotificationEventRequest& rqst) +void ThriftHiveMetastoreClient::get_open_txns(GetOpenTxnsResponse& _return) { - send_get_next_notification(rqst); - recv_get_next_notification(_return); + send_get_open_txns(); + recv_get_open_txns(_return); } -void ThriftHiveMetastoreClient::send_get_next_notification(const NotificationEventRequest& rqst) +void ThriftHiveMetastoreClient::send_get_open_txns() { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_open_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_next_notification_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_get_open_txns_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52132,7 +52487,7 @@ void ThriftHiveMetastoreClient::send_get_next_notification(const NotificationEve oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_next_notification(NotificationEventResponse& _return) +void ThriftHiveMetastoreClient::recv_get_open_txns(GetOpenTxnsResponse& _return) { int32_t rseqid = 0; @@ -52152,12 +52507,12 @@ void ThriftHiveMetastoreClient::recv_get_next_notification(NotificationEventResp iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_next_notification") != 0) { + if (fname.compare("get_open_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_next_notification_presult result; + ThriftHiveMetastore_get_open_txns_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -52167,21 +52522,21 @@ void ThriftHiveMetastoreClient::recv_get_next_notification(NotificationEventResp // _return pointer has now been filled return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_next_notification failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns failed: unknown result"); } -void ThriftHiveMetastoreClient::get_current_notificationEventId(CurrentNotificationEventId& _return) +void ThriftHiveMetastoreClient::get_open_txns_info(GetOpenTxnsInfoResponse& _return) { - send_get_current_notificationEventId(); - recv_get_current_notificationEventId(_return); + send_get_open_txns_info(); + recv_get_open_txns_info(_return); } -void ThriftHiveMetastoreClient::send_get_current_notificationEventId() +void ThriftHiveMetastoreClient::send_get_open_txns_info() { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_open_txns_info", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_current_notificationEventId_pargs args; + ThriftHiveMetastore_get_open_txns_info_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52189,7 +52544,7 @@ void ThriftHiveMetastoreClient::send_get_current_notificationEventId() oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_current_notificationEventId(CurrentNotificationEventId& _return) +void ThriftHiveMetastoreClient::recv_get_open_txns_info(GetOpenTxnsInfoResponse& _return) { int32_t rseqid = 0; @@ -52209,12 +52564,12 @@ void ThriftHiveMetastoreClient::recv_get_current_notificationEventId(CurrentNoti iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_current_notificationEventId") != 0) { + if (fname.compare("get_open_txns_info") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_current_notificationEventId_presult result; + ThriftHiveMetastore_get_open_txns_info_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -52224,21 +52579,21 @@ void ThriftHiveMetastoreClient::recv_get_current_notificationEventId(CurrentNoti // _return pointer has now been filled return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_current_notificationEventId failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns_info failed: unknown result"); } -void ThriftHiveMetastoreClient::get_notification_events_count(NotificationEventsCountResponse& _return, const NotificationEventsCountRequest& rqst) +void ThriftHiveMetastoreClient::open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& rqst) { - send_get_notification_events_count(rqst); - recv_get_notification_events_count(_return); + send_open_txns(rqst); + recv_open_txns(_return); } -void ThriftHiveMetastoreClient::send_get_notification_events_count(const NotificationEventsCountRequest& rqst) +void ThriftHiveMetastoreClient::send_open_txns(const OpenTxnRequest& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_notification_events_count", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("open_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_notification_events_count_pargs args; + ThriftHiveMetastore_open_txns_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -52247,7 +52602,7 @@ void ThriftHiveMetastoreClient::send_get_notification_events_count(const Notific oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_notification_events_count(NotificationEventsCountResponse& _return) +void ThriftHiveMetastoreClient::recv_open_txns(OpenTxnsResponse& _return) { int32_t rseqid = 0; @@ -52267,12 +52622,12 @@ void ThriftHiveMetastoreClient::recv_get_notification_events_count(NotificationE iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_notification_events_count") != 0) { + if (fname.compare("open_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_notification_events_count_presult result; + ThriftHiveMetastore_open_txns_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -52282,21 +52637,21 @@ void ThriftHiveMetastoreClient::recv_get_notification_events_count(NotificationE // _return pointer has now been filled return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_notification_events_count failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "open_txns failed: unknown result"); } -void ThriftHiveMetastoreClient::fire_listener_event(FireEventResponse& _return, const FireEventRequest& rqst) +void ThriftHiveMetastoreClient::abort_txn(const AbortTxnRequest& rqst) { - send_fire_listener_event(rqst); - recv_fire_listener_event(_return); + send_abort_txn(rqst); + recv_abort_txn(); } -void ThriftHiveMetastoreClient::send_fire_listener_event(const FireEventRequest& rqst) +void ThriftHiveMetastoreClient::send_abort_txn(const AbortTxnRequest& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("abort_txn", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_fire_listener_event_pargs args; + ThriftHiveMetastore_abort_txn_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -52305,7 +52660,7 @@ void ThriftHiveMetastoreClient::send_fire_listener_event(const FireEventRequest& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_fire_listener_event(FireEventResponse& _return) +void ThriftHiveMetastoreClient::recv_abort_txn() { int32_t rseqid = 0; @@ -52325,36 +52680,35 @@ void ThriftHiveMetastoreClient::recv_fire_listener_event(FireEventResponse& _ret iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("fire_listener_event") != 0) { + if (fname.compare("abort_txn") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_fire_listener_event_presult result; - result.success = &_return; + ThriftHiveMetastore_abort_txn_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; + if (result.__isset.o1) { + throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fire_listener_event failed: unknown result"); + return; } -void ThriftHiveMetastoreClient::flushCache() +void ThriftHiveMetastoreClient::abort_txns(const AbortTxnsRequest& rqst) { - send_flushCache(); - recv_flushCache(); + send_abort_txns(rqst); + recv_abort_txns(); } -void ThriftHiveMetastoreClient::send_flushCache() +void ThriftHiveMetastoreClient::send_abort_txns(const AbortTxnsRequest& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("abort_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_flushCache_pargs args; + ThriftHiveMetastore_abort_txns_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52362,7 +52716,7 @@ void ThriftHiveMetastoreClient::send_flushCache() oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_flushCache() +void ThriftHiveMetastoreClient::recv_abort_txns() { int32_t rseqid = 0; @@ -52382,32 +52736,35 @@ void ThriftHiveMetastoreClient::recv_flushCache() iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("flushCache") != 0) { + if (fname.compare("abort_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_flushCache_presult result; + ThriftHiveMetastore_abort_txns_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.o1) { + throw result.o1; + } return; } -void ThriftHiveMetastoreClient::cm_recycle(CmRecycleResponse& _return, const CmRecycleRequest& request) +void ThriftHiveMetastoreClient::commit_txn(const CommitTxnRequest& rqst) { - send_cm_recycle(request); - recv_cm_recycle(_return); + send_commit_txn(rqst); + recv_commit_txn(); } -void ThriftHiveMetastoreClient::send_cm_recycle(const CmRecycleRequest& request) +void ThriftHiveMetastoreClient::send_commit_txn(const CommitTxnRequest& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("cm_recycle", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("commit_txn", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_cm_recycle_pargs args; - args.request = &request; + ThriftHiveMetastore_commit_txn_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52415,7 +52772,7 @@ void ThriftHiveMetastoreClient::send_cm_recycle(const CmRecycleRequest& request) oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_cm_recycle(CmRecycleResponse& _return) +void ThriftHiveMetastoreClient::recv_commit_txn() { int32_t rseqid = 0; @@ -52435,40 +52792,38 @@ void ThriftHiveMetastoreClient::recv_cm_recycle(CmRecycleResponse& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("cm_recycle") != 0) { + if (fname.compare("commit_txn") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_cm_recycle_presult result; - result.success = &_return; + ThriftHiveMetastore_commit_txn_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cm_recycle failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + return; } -void ThriftHiveMetastoreClient::get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) +void ThriftHiveMetastoreClient::lock(LockResponse& _return, const LockRequest& rqst) { - send_get_file_metadata_by_expr(req); - recv_get_file_metadata_by_expr(_return); + send_lock(rqst); + recv_lock(_return); } -void ThriftHiveMetastoreClient::send_get_file_metadata_by_expr(const GetFileMetadataByExprRequest& req) +void ThriftHiveMetastoreClient::send_lock(const LockRequest& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("lock", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_file_metadata_by_expr_pargs args; - args.req = &req; + ThriftHiveMetastore_lock_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52476,7 +52831,7 @@ void ThriftHiveMetastoreClient::send_get_file_metadata_by_expr(const GetFileMeta oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_file_metadata_by_expr(GetFileMetadataByExprResult& _return) +void ThriftHiveMetastoreClient::recv_lock(LockResponse& _return) { int32_t rseqid = 0; @@ -52496,12 +52851,12 @@ void ThriftHiveMetastoreClient::recv_get_file_metadata_by_expr(GetFileMetadataBy iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_file_metadata_by_expr") != 0) { + if (fname.compare("lock") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_file_metadata_by_expr_presult result; + ThriftHiveMetastore_lock_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -52511,22 +52866,28 @@ void ThriftHiveMetastoreClient::recv_get_file_metadata_by_expr(GetFileMetadataBy // _return pointer has now been filled return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata_by_expr failed: unknown result"); + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "lock failed: unknown result"); } -void ThriftHiveMetastoreClient::get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) +void ThriftHiveMetastoreClient::check_lock(LockResponse& _return, const CheckLockRequest& rqst) { - send_get_file_metadata(req); - recv_get_file_metadata(_return); + send_check_lock(rqst); + recv_check_lock(_return); } -void ThriftHiveMetastoreClient::send_get_file_metadata(const GetFileMetadataRequest& req) +void ThriftHiveMetastoreClient::send_check_lock(const CheckLockRequest& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("check_lock", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_file_metadata_pargs args; - args.req = &req; + ThriftHiveMetastore_check_lock_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52534,7 +52895,7 @@ void ThriftHiveMetastoreClient::send_get_file_metadata(const GetFileMetadataRequ oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_file_metadata(GetFileMetadataResult& _return) +void ThriftHiveMetastoreClient::recv_check_lock(LockResponse& _return) { int32_t rseqid = 0; @@ -52554,12 +52915,12 @@ void ThriftHiveMetastoreClient::recv_get_file_metadata(GetFileMetadataResult& _r iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_file_metadata") != 0) { + if (fname.compare("check_lock") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_file_metadata_presult result; + ThriftHiveMetastore_check_lock_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -52569,22 +52930,31 @@ void ThriftHiveMetastoreClient::recv_get_file_metadata(GetFileMetadataResult& _r // _return pointer has now been filled return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata failed: unknown result"); + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "check_lock failed: unknown result"); } -void ThriftHiveMetastoreClient::put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) +void ThriftHiveMetastoreClient::unlock(const UnlockRequest& rqst) { - send_put_file_metadata(req); - recv_put_file_metadata(_return); + send_unlock(rqst); + recv_unlock(); } -void ThriftHiveMetastoreClient::send_put_file_metadata(const PutFileMetadataRequest& req) +void ThriftHiveMetastoreClient::send_unlock(const UnlockRequest& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("unlock", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_put_file_metadata_pargs args; - args.req = &req; + ThriftHiveMetastore_unlock_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52592,7 +52962,7 @@ void ThriftHiveMetastoreClient::send_put_file_metadata(const PutFileMetadataRequ oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_put_file_metadata(PutFileMetadataResult& _return) +void ThriftHiveMetastoreClient::recv_unlock() { int32_t rseqid = 0; @@ -52612,37 +52982,38 @@ void ThriftHiveMetastoreClient::recv_put_file_metadata(PutFileMetadataResult& _r iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("put_file_metadata") != 0) { + if (fname.compare("unlock") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_put_file_metadata_presult result; - result.success = &_return; + ThriftHiveMetastore_unlock_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; + if (result.__isset.o1) { + throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "put_file_metadata failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + return; } -void ThriftHiveMetastoreClient::clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) +void ThriftHiveMetastoreClient::show_locks(ShowLocksResponse& _return, const ShowLocksRequest& rqst) { - send_clear_file_metadata(req); - recv_clear_file_metadata(_return); + send_show_locks(rqst); + recv_show_locks(_return); } -void ThriftHiveMetastoreClient::send_clear_file_metadata(const ClearFileMetadataRequest& req) +void ThriftHiveMetastoreClient::send_show_locks(const ShowLocksRequest& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("show_locks", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_clear_file_metadata_pargs args; - args.req = &req; + ThriftHiveMetastore_show_locks_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52650,7 +53021,7 @@ void ThriftHiveMetastoreClient::send_clear_file_metadata(const ClearFileMetadata oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_clear_file_metadata(ClearFileMetadataResult& _return) +void ThriftHiveMetastoreClient::recv_show_locks(ShowLocksResponse& _return) { int32_t rseqid = 0; @@ -52670,12 +53041,12 @@ void ThriftHiveMetastoreClient::recv_clear_file_metadata(ClearFileMetadataResult iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("clear_file_metadata") != 0) { + if (fname.compare("show_locks") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_clear_file_metadata_presult result; + ThriftHiveMetastore_show_locks_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -52685,22 +53056,22 @@ void ThriftHiveMetastoreClient::recv_clear_file_metadata(ClearFileMetadataResult // _return pointer has now been filled return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "clear_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_locks failed: unknown result"); } -void ThriftHiveMetastoreClient::cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) +void ThriftHiveMetastoreClient::heartbeat(const HeartbeatRequest& ids) { - send_cache_file_metadata(req); - recv_cache_file_metadata(_return); + send_heartbeat(ids); + recv_heartbeat(); } -void ThriftHiveMetastoreClient::send_cache_file_metadata(const CacheFileMetadataRequest& req) +void ThriftHiveMetastoreClient::send_heartbeat(const HeartbeatRequest& ids) { int32_t cseqid = 0; - oprot_->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_cache_file_metadata_pargs args; - args.req = &req; + ThriftHiveMetastore_heartbeat_pargs args; + args.ids = &ids; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52708,7 +53079,7 @@ void ThriftHiveMetastoreClient::send_cache_file_metadata(const CacheFileMetadata oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_cache_file_metadata(CacheFileMetadataResult& _return) +void ThriftHiveMetastoreClient::recv_heartbeat() { int32_t rseqid = 0; @@ -52728,36 +53099,41 @@ void ThriftHiveMetastoreClient::recv_cache_file_metadata(CacheFileMetadataResult iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("cache_file_metadata") != 0) { + if (fname.compare("heartbeat") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_cache_file_metadata_presult result; - result.success = &_return; + ThriftHiveMetastore_heartbeat_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; + if (result.__isset.o1) { + throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cache_file_metadata failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + return; } -void ThriftHiveMetastoreClient::get_metastore_db_uuid(std::string& _return) +void ThriftHiveMetastoreClient::heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const HeartbeatTxnRangeRequest& txns) { - send_get_metastore_db_uuid(); - recv_get_metastore_db_uuid(_return); + send_heartbeat_txn_range(txns); + recv_heartbeat_txn_range(_return); } -void ThriftHiveMetastoreClient::send_get_metastore_db_uuid() +void ThriftHiveMetastoreClient::send_heartbeat_txn_range(const HeartbeatTxnRangeRequest& txns) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_metastore_db_uuid", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_metastore_db_uuid_pargs args; + ThriftHiveMetastore_heartbeat_txn_range_pargs args; + args.txns = &txns; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52765,7 +53141,7 @@ void ThriftHiveMetastoreClient::send_get_metastore_db_uuid() oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_metastore_db_uuid(std::string& _return) +void ThriftHiveMetastoreClient::recv_heartbeat_txn_range(HeartbeatTxnRangeResponse& _return) { int32_t rseqid = 0; @@ -52785,12 +53161,12 @@ void ThriftHiveMetastoreClient::recv_get_metastore_db_uuid(std::string& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_metastore_db_uuid") != 0) { + if (fname.compare("heartbeat_txn_range") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_metastore_db_uuid_presult result; + ThriftHiveMetastore_heartbeat_txn_range_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -52800,25 +53176,22 @@ void ThriftHiveMetastoreClient::recv_get_metastore_db_uuid(std::string& _return) // _return pointer has now been filled return; } - if (result.__isset.o1) { - throw result.o1; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_metastore_db_uuid failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_txn_range failed: unknown result"); } -void ThriftHiveMetastoreClient::create_resource_plan(WMCreateResourcePlanResponse& _return, const WMCreateResourcePlanRequest& request) +void ThriftHiveMetastoreClient::compact(const CompactionRequest& rqst) { - send_create_resource_plan(request); - recv_create_resource_plan(_return); + send_compact(rqst); + recv_compact(); } -void ThriftHiveMetastoreClient::send_create_resource_plan(const WMCreateResourcePlanRequest& request) +void ThriftHiveMetastoreClient::send_compact(const CompactionRequest& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("create_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("compact", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_resource_plan_pargs args; - args.request = &request; + ThriftHiveMetastore_compact_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52826,7 +53199,7 @@ void ThriftHiveMetastoreClient::send_create_resource_plan(const WMCreateResource oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_create_resource_plan(WMCreateResourcePlanResponse& _return) +void ThriftHiveMetastoreClient::recv_compact() { int32_t rseqid = 0; @@ -52846,46 +53219,32 @@ void ThriftHiveMetastoreClient::recv_create_resource_plan(WMCreateResourcePlanRe iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_resource_plan") != 0) { + if (fname.compare("compact") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_create_resource_plan_presult result; - result.success = &_return; + ThriftHiveMetastore_compact_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_resource_plan failed: unknown result"); + return; } -void ThriftHiveMetastoreClient::get_resource_plan(WMGetResourcePlanResponse& _return, const WMGetResourcePlanRequest& request) +void ThriftHiveMetastoreClient::compact2(CompactionResponse& _return, const CompactionRequest& rqst) { - send_get_resource_plan(request); - recv_get_resource_plan(_return); + send_compact2(rqst); + recv_compact2(_return); } -void ThriftHiveMetastoreClient::send_get_resource_plan(const WMGetResourcePlanRequest& request) +void ThriftHiveMetastoreClient::send_compact2(const CompactionRequest& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("compact2", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_resource_plan_pargs args; - args.request = &request; + ThriftHiveMetastore_compact2_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52893,7 +53252,7 @@ void ThriftHiveMetastoreClient::send_get_resource_plan(const WMGetResourcePlanRe oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_resource_plan(WMGetResourcePlanResponse& _return) +void ThriftHiveMetastoreClient::recv_compact2(CompactionResponse& _return) { int32_t rseqid = 0; @@ -52913,12 +53272,12 @@ void ThriftHiveMetastoreClient::recv_get_resource_plan(WMGetResourcePlanResponse iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_resource_plan") != 0) { + if (fname.compare("compact2") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_resource_plan_presult result; + ThriftHiveMetastore_compact2_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -52928,28 +53287,22 @@ void ThriftHiveMetastoreClient::recv_get_resource_plan(WMGetResourcePlanResponse // _return pointer has now been filled return; } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_resource_plan failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "compact2 failed: unknown result"); } -void ThriftHiveMetastoreClient::get_active_resource_plan(WMGetActiveResourcePlanResponse& _return, const WMGetActiveResourcePlanRequest& request) +void ThriftHiveMetastoreClient::show_compact(ShowCompactResponse& _return, const ShowCompactRequest& rqst) { - send_get_active_resource_plan(request); - recv_get_active_resource_plan(_return); + send_show_compact(rqst); + recv_show_compact(_return); } -void ThriftHiveMetastoreClient::send_get_active_resource_plan(const WMGetActiveResourcePlanRequest& request) +void ThriftHiveMetastoreClient::send_show_compact(const ShowCompactRequest& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_active_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_active_resource_plan_pargs args; - args.request = &request; + ThriftHiveMetastore_show_compact_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -52957,7 +53310,7 @@ void ThriftHiveMetastoreClient::send_get_active_resource_plan(const WMGetActiveR oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_active_resource_plan(WMGetActiveResourcePlanResponse& _return) +void ThriftHiveMetastoreClient::recv_show_compact(ShowCompactResponse& _return) { int32_t rseqid = 0; @@ -52977,12 +53330,12 @@ void ThriftHiveMetastoreClient::recv_get_active_resource_plan(WMGetActiveResourc iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_active_resource_plan") != 0) { + if (fname.compare("show_compact") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_active_resource_plan_presult result; + ThriftHiveMetastore_show_compact_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -52992,25 +53345,22 @@ void ThriftHiveMetastoreClient::recv_get_active_resource_plan(WMGetActiveResourc // _return pointer has now been filled return; } - if (result.__isset.o2) { - throw result.o2; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_active_resource_plan failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_compact failed: unknown result"); } -void ThriftHiveMetastoreClient::get_all_resource_plans(WMGetAllResourcePlanResponse& _return, const WMGetAllResourcePlanRequest& request) +void ThriftHiveMetastoreClient::add_dynamic_partitions(const AddDynamicPartitions& rqst) { - send_get_all_resource_plans(request); - recv_get_all_resource_plans(_return); + send_add_dynamic_partitions(rqst); + recv_add_dynamic_partitions(); } -void ThriftHiveMetastoreClient::send_get_all_resource_plans(const WMGetAllResourcePlanRequest& request) +void ThriftHiveMetastoreClient::send_add_dynamic_partitions(const AddDynamicPartitions& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_all_resource_plans", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_resource_plans_pargs args; - args.request = &request; + ThriftHiveMetastore_add_dynamic_partitions_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -53018,7 +53368,7 @@ void ThriftHiveMetastoreClient::send_get_all_resource_plans(const WMGetAllResour oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_all_resource_plans(WMGetAllResourcePlanResponse& _return) +void ThriftHiveMetastoreClient::recv_add_dynamic_partitions() { int32_t rseqid = 0; @@ -53038,40 +53388,38 @@ void ThriftHiveMetastoreClient::recv_get_all_resource_plans(WMGetAllResourcePlan iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_resource_plans") != 0) { + if (fname.compare("add_dynamic_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_all_resource_plans_presult result; - result.success = &_return; + ThriftHiveMetastore_add_dynamic_partitions_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } if (result.__isset.o1) { throw result.o1; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_resource_plans failed: unknown result"); + if (result.__isset.o2) { + throw result.o2; + } + return; } -void ThriftHiveMetastoreClient::alter_resource_plan(WMAlterResourcePlanResponse& _return, const WMAlterResourcePlanRequest& request) +void ThriftHiveMetastoreClient::get_next_notification(NotificationEventResponse& _return, const NotificationEventRequest& rqst) { - send_alter_resource_plan(request); - recv_alter_resource_plan(_return); + send_get_next_notification(rqst); + recv_get_next_notification(_return); } -void ThriftHiveMetastoreClient::send_alter_resource_plan(const WMAlterResourcePlanRequest& request) +void ThriftHiveMetastoreClient::send_get_next_notification(const NotificationEventRequest& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("alter_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_resource_plan_pargs args; - args.request = &request; + ThriftHiveMetastore_get_next_notification_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -53079,7 +53427,7 @@ void ThriftHiveMetastoreClient::send_alter_resource_plan(const WMAlterResourcePl oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_alter_resource_plan(WMAlterResourcePlanResponse& _return) +void ThriftHiveMetastoreClient::recv_get_next_notification(NotificationEventResponse& _return) { int32_t rseqid = 0; @@ -53099,12 +53447,12 @@ void ThriftHiveMetastoreClient::recv_alter_resource_plan(WMAlterResourcePlanResp iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_resource_plan") != 0) { + if (fname.compare("get_next_notification") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_alter_resource_plan_presult result; + ThriftHiveMetastore_get_next_notification_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -53114,31 +53462,21 @@ void ThriftHiveMetastoreClient::recv_alter_resource_plan(WMAlterResourcePlanResp // _return pointer has now been filled return; } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "alter_resource_plan failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_next_notification failed: unknown result"); } -void ThriftHiveMetastoreClient::validate_resource_plan(WMValidateResourcePlanResponse& _return, const WMValidateResourcePlanRequest& request) +void ThriftHiveMetastoreClient::get_current_notificationEventId(CurrentNotificationEventId& _return) { - send_validate_resource_plan(request); - recv_validate_resource_plan(_return); + send_get_current_notificationEventId(); + recv_get_current_notificationEventId(_return); } -void ThriftHiveMetastoreClient::send_validate_resource_plan(const WMValidateResourcePlanRequest& request) +void ThriftHiveMetastoreClient::send_get_current_notificationEventId() { int32_t cseqid = 0; - oprot_->writeMessageBegin("validate_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_validate_resource_plan_pargs args; - args.request = &request; + ThriftHiveMetastore_get_current_notificationEventId_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -53146,7 +53484,7 @@ void ThriftHiveMetastoreClient::send_validate_resource_plan(const WMValidateReso oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_validate_resource_plan(WMValidateResourcePlanResponse& _return) +void ThriftHiveMetastoreClient::recv_get_current_notificationEventId(CurrentNotificationEventId& _return) { int32_t rseqid = 0; @@ -53166,12 +53504,12 @@ void ThriftHiveMetastoreClient::recv_validate_resource_plan(WMValidateResourcePl iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("validate_resource_plan") != 0) { + if (fname.compare("get_current_notificationEventId") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_validate_resource_plan_presult result; + ThriftHiveMetastore_get_current_notificationEventId_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -53181,28 +53519,22 @@ void ThriftHiveMetastoreClient::recv_validate_resource_plan(WMValidateResourcePl // _return pointer has now been filled return; } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "validate_resource_plan failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_current_notificationEventId failed: unknown result"); } -void ThriftHiveMetastoreClient::drop_resource_plan(WMDropResourcePlanResponse& _return, const WMDropResourcePlanRequest& request) +void ThriftHiveMetastoreClient::get_notification_events_count(NotificationEventsCountResponse& _return, const NotificationEventsCountRequest& rqst) { - send_drop_resource_plan(request); - recv_drop_resource_plan(_return); + send_get_notification_events_count(rqst); + recv_get_notification_events_count(_return); } -void ThriftHiveMetastoreClient::send_drop_resource_plan(const WMDropResourcePlanRequest& request) +void ThriftHiveMetastoreClient::send_get_notification_events_count(const NotificationEventsCountRequest& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_notification_events_count", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_resource_plan_pargs args; - args.request = &request; + ThriftHiveMetastore_get_notification_events_count_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -53210,7 +53542,7 @@ void ThriftHiveMetastoreClient::send_drop_resource_plan(const WMDropResourcePlan oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_drop_resource_plan(WMDropResourcePlanResponse& _return) +void ThriftHiveMetastoreClient::recv_get_notification_events_count(NotificationEventsCountResponse& _return) { int32_t rseqid = 0; @@ -53230,12 +53562,12 @@ void ThriftHiveMetastoreClient::recv_drop_resource_plan(WMDropResourcePlanRespon iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_resource_plan") != 0) { + if (fname.compare("get_notification_events_count") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_drop_resource_plan_presult result; + ThriftHiveMetastore_get_notification_events_count_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -53245,31 +53577,22 @@ void ThriftHiveMetastoreClient::recv_drop_resource_plan(WMDropResourcePlanRespon // _return pointer has now been filled return; } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_resource_plan failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_notification_events_count failed: unknown result"); } -void ThriftHiveMetastoreClient::create_wm_trigger(WMCreateTriggerResponse& _return, const WMCreateTriggerRequest& request) +void ThriftHiveMetastoreClient::fire_listener_event(FireEventResponse& _return, const FireEventRequest& rqst) { - send_create_wm_trigger(request); - recv_create_wm_trigger(_return); + send_fire_listener_event(rqst); + recv_fire_listener_event(_return); } -void ThriftHiveMetastoreClient::send_create_wm_trigger(const WMCreateTriggerRequest& request) +void ThriftHiveMetastoreClient::send_fire_listener_event(const FireEventRequest& rqst) { int32_t cseqid = 0; - oprot_->writeMessageBegin("create_wm_trigger", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_wm_trigger_pargs args; - args.request = &request; + ThriftHiveMetastore_fire_listener_event_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -53277,7 +53600,7 @@ void ThriftHiveMetastoreClient::send_create_wm_trigger(const WMCreateTriggerRequ oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_create_wm_trigger(WMCreateTriggerResponse& _return) +void ThriftHiveMetastoreClient::recv_fire_listener_event(FireEventResponse& _return) { int32_t rseqid = 0; @@ -53297,12 +53620,12 @@ void ThriftHiveMetastoreClient::recv_create_wm_trigger(WMCreateTriggerResponse& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_wm_trigger") != 0) { + if (fname.compare("fire_listener_event") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_create_wm_trigger_presult result; + ThriftHiveMetastore_fire_listener_event_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -53312,33 +53635,73 @@ void ThriftHiveMetastoreClient::recv_create_wm_trigger(WMCreateTriggerResponse& // _return pointer has now been filled return; } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fire_listener_event failed: unknown result"); +} + +void ThriftHiveMetastoreClient::flushCache() +{ + send_flushCache(); + recv_flushCache(); +} + +void ThriftHiveMetastoreClient::send_flushCache() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_flushCache_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_flushCache() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; } - if (result.__isset.o3) { - throw result.o3; + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); } - if (result.__isset.o4) { - throw result.o4; + if (fname.compare("flushCache") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_wm_trigger failed: unknown result"); + ThriftHiveMetastore_flushCache_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + return; } -void ThriftHiveMetastoreClient::alter_wm_trigger(WMAlterTriggerResponse& _return, const WMAlterTriggerRequest& request) +void ThriftHiveMetastoreClient::cm_recycle(CmRecycleResponse& _return, const CmRecycleRequest& request) { - send_alter_wm_trigger(request); - recv_alter_wm_trigger(_return); + send_cm_recycle(request); + recv_cm_recycle(_return); } -void ThriftHiveMetastoreClient::send_alter_wm_trigger(const WMAlterTriggerRequest& request) +void ThriftHiveMetastoreClient::send_cm_recycle(const CmRecycleRequest& request) { int32_t cseqid = 0; - oprot_->writeMessageBegin("alter_wm_trigger", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("cm_recycle", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_wm_trigger_pargs args; + ThriftHiveMetastore_cm_recycle_pargs args; args.request = &request; args.write(oprot_); @@ -53347,7 +53710,7 @@ void ThriftHiveMetastoreClient::send_alter_wm_trigger(const WMAlterTriggerReques oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_alter_wm_trigger(WMAlterTriggerResponse& _return) +void ThriftHiveMetastoreClient::recv_cm_recycle(CmRecycleResponse& _return) { int32_t rseqid = 0; @@ -53367,12 +53730,12 @@ void ThriftHiveMetastoreClient::recv_alter_wm_trigger(WMAlterTriggerResponse& _r iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_wm_trigger") != 0) { + if (fname.compare("cm_recycle") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_alter_wm_trigger_presult result; + ThriftHiveMetastore_cm_recycle_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -53385,28 +53748,22 @@ void ThriftHiveMetastoreClient::recv_alter_wm_trigger(WMAlterTriggerResponse& _r if (result.__isset.o1) { throw result.o1; } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "alter_wm_trigger failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cm_recycle failed: unknown result"); } -void ThriftHiveMetastoreClient::drop_wm_trigger(WMDropTriggerResponse& _return, const WMDropTriggerRequest& request) +void ThriftHiveMetastoreClient::get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) { - send_drop_wm_trigger(request); - recv_drop_wm_trigger(_return); + send_get_file_metadata_by_expr(req); + recv_get_file_metadata_by_expr(_return); } -void ThriftHiveMetastoreClient::send_drop_wm_trigger(const WMDropTriggerRequest& request) +void ThriftHiveMetastoreClient::send_get_file_metadata_by_expr(const GetFileMetadataByExprRequest& req) { int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_wm_trigger", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_wm_trigger_pargs args; - args.request = &request; + ThriftHiveMetastore_get_file_metadata_by_expr_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -53414,7 +53771,7 @@ void ThriftHiveMetastoreClient::send_drop_wm_trigger(const WMDropTriggerRequest& oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_drop_wm_trigger(WMDropTriggerResponse& _return) +void ThriftHiveMetastoreClient::recv_get_file_metadata_by_expr(GetFileMetadataByExprResult& _return) { int32_t rseqid = 0; @@ -53434,12 +53791,12 @@ void ThriftHiveMetastoreClient::recv_drop_wm_trigger(WMDropTriggerResponse& _ret iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_wm_trigger") != 0) { + if (fname.compare("get_file_metadata_by_expr") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_drop_wm_trigger_presult result; + ThriftHiveMetastore_get_file_metadata_by_expr_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -53449,31 +53806,22 @@ void ThriftHiveMetastoreClient::recv_drop_wm_trigger(WMDropTriggerResponse& _ret // _return pointer has now been filled return; } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_wm_trigger failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata_by_expr failed: unknown result"); } -void ThriftHiveMetastoreClient::get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const WMGetTriggersForResourePlanRequest& request) +void ThriftHiveMetastoreClient::get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) { - send_get_triggers_for_resourceplan(request); - recv_get_triggers_for_resourceplan(_return); + send_get_file_metadata(req); + recv_get_file_metadata(_return); } -void ThriftHiveMetastoreClient::send_get_triggers_for_resourceplan(const WMGetTriggersForResourePlanRequest& request) +void ThriftHiveMetastoreClient::send_get_file_metadata(const GetFileMetadataRequest& req) { int32_t cseqid = 0; - oprot_->writeMessageBegin("get_triggers_for_resourceplan", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_triggers_for_resourceplan_pargs args; - args.request = &request; + ThriftHiveMetastore_get_file_metadata_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -53481,7 +53829,7 @@ void ThriftHiveMetastoreClient::send_get_triggers_for_resourceplan(const WMGetTr oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreClient::recv_get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return) +void ThriftHiveMetastoreClient::recv_get_file_metadata(GetFileMetadataResult& _return) { int32_t rseqid = 0; @@ -53501,12 +53849,12 @@ void ThriftHiveMetastoreClient::recv_get_triggers_for_resourceplan(WMGetTriggers iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_triggers_for_resourceplan") != 0) { + if (fname.compare("get_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - ThriftHiveMetastore_get_triggers_for_resourceplan_presult result; + ThriftHiveMetastore_get_file_metadata_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -53516,1160 +53864,1351 @@ void ThriftHiveMetastoreClient::recv_get_triggers_for_resourceplan(WMGetTriggers // _return pointer has now been filled return; } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_triggers_for_resourceplan failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata failed: unknown result"); } -bool ThriftHiveMetastoreProcessor::dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext) { - ProcessMap::iterator pfn; - pfn = processMap_.find(fname); - if (pfn == processMap_.end()) { - return ::facebook::fb303::FacebookServiceProcessor::dispatchCall(iprot, oprot, fname, seqid, callContext); - } - (this->*(pfn->second))(seqid, iprot, oprot, callContext); - return true; +void ThriftHiveMetastoreClient::put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) +{ + send_put_file_metadata(req); + recv_put_file_metadata(_return); } -void ThriftHiveMetastoreProcessor::process_getMetaConf(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreClient::send_put_file_metadata(const PutFileMetadataRequest& req) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.getMetaConf", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.getMetaConf"); + int32_t cseqid = 0; + oprot_->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.getMetaConf"); - } + ThriftHiveMetastore_put_file_metadata_pargs args; + args.req = &req; + args.write(oprot_); - ThriftHiveMetastore_getMetaConf_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.getMetaConf", bytes); - } +void ThriftHiveMetastoreClient::recv_put_file_metadata(PutFileMetadataResult& _return) +{ - ThriftHiveMetastore_getMetaConf_result result; - try { - iface_->getMetaConf(result.success, args.key); - result.__isset.success = true; - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.getMetaConf"); - } + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("getMetaConf", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.getMetaConf"); + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); } + if (fname.compare("put_file_metadata") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_put_file_metadata_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - oprot->writeMessageBegin("getMetaConf", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.getMetaConf", bytes); + if (result.__isset.success) { + // _return pointer has now been filled + return; } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "put_file_metadata failed: unknown result"); } -void ThriftHiveMetastoreProcessor::process_setMetaConf(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreClient::clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.setMetaConf", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.setMetaConf"); + send_clear_file_metadata(req); + recv_clear_file_metadata(_return); +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.setMetaConf"); - } +void ThriftHiveMetastoreClient::send_clear_file_metadata(const ClearFileMetadataRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_setMetaConf_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + ThriftHiveMetastore_clear_file_metadata_pargs args; + args.req = &req; + args.write(oprot_); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.setMetaConf", bytes); - } + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} - ThriftHiveMetastore_setMetaConf_result result; - try { - iface_->setMetaConf(args.key, args.value); - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.setMetaConf"); - } +void ThriftHiveMetastoreClient::recv_clear_file_metadata(ClearFileMetadataResult& _return) +{ - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("setMetaConf", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.setMetaConf"); + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("clear_file_metadata") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_clear_file_metadata_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - oprot->writeMessageBegin("setMetaConf", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.setMetaConf", bytes); + if (result.__isset.success) { + // _return pointer has now been filled + return; } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "clear_file_metadata failed: unknown result"); } -void ThriftHiveMetastoreProcessor::process_create_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreClient::cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_database", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_database"); + send_cache_file_metadata(req); + recv_cache_file_metadata(_return); +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_database"); - } +void ThriftHiveMetastoreClient::send_cache_file_metadata(const CacheFileMetadataRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_database_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + ThriftHiveMetastore_cache_file_metadata_pargs args; + args.req = &req; + args.write(oprot_); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_database", bytes); - } + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} - ThriftHiveMetastore_create_database_result result; - try { - iface_->create_database(args.database); - } catch (AlreadyExistsException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (InvalidObjectException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_database"); - } +void ThriftHiveMetastoreClient::recv_cache_file_metadata(CacheFileMetadataResult& _return) +{ - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("create_database", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_database"); + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("cache_file_metadata") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_cache_file_metadata_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - oprot->writeMessageBegin("create_database", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_database", bytes); + if (result.__isset.success) { + // _return pointer has now been filled + return; } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cache_file_metadata failed: unknown result"); } -void ThriftHiveMetastoreProcessor::process_get_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreClient::get_metastore_db_uuid(std::string& _return) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_database", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_database"); + send_get_metastore_db_uuid(); + recv_get_metastore_db_uuid(_return); +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_database"); - } +void ThriftHiveMetastoreClient::send_get_metastore_db_uuid() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_metastore_db_uuid", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_database_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + ThriftHiveMetastore_get_metastore_db_uuid_pargs args; + args.write(oprot_); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_database", bytes); - } + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} - ThriftHiveMetastore_get_database_result result; - try { - iface_->get_database(result.success, args.name); - result.__isset.success = true; - } catch (NoSuchObjectException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_database"); - } +void ThriftHiveMetastoreClient::recv_get_metastore_db_uuid(std::string& _return) +{ - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_database", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_database"); + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; } - - oprot->writeMessageBegin("get_database", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_database", bytes); + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); } -} - -void ThriftHiveMetastoreProcessor::process_drop_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_database", callContext); + if (fname.compare("get_metastore_db_uuid") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_database"); + ThriftHiveMetastore_get_metastore_db_uuid_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_database"); + if (result.__isset.success) { + // _return pointer has now been filled + return; } - - ThriftHiveMetastore_drop_database_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_database", bytes); + if (result.__isset.o1) { + throw result.o1; } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_metastore_db_uuid failed: unknown result"); +} - ThriftHiveMetastore_drop_database_result result; - try { - iface_->drop_database(args.name, args.deleteData, args.cascade); - } catch (NoSuchObjectException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (InvalidOperationException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_database"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } +void ThriftHiveMetastoreClient::create_resource_plan(WMCreateResourcePlanResponse& _return, const WMCreateResourcePlanRequest& request) +{ + send_create_resource_plan(request); + recv_create_resource_plan(_return); +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_database"); - } +void ThriftHiveMetastoreClient::send_create_resource_plan(const WMCreateResourcePlanRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("create_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - oprot->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + ThriftHiveMetastore_create_resource_plan_pargs args; + args.request = &request; + args.write(oprot_); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_database", bytes); - } + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreProcessor::process_get_databases(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreClient::recv_create_resource_plan(WMCreateResourcePlanResponse& _return) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_databases", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_databases"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_databases"); - } - ThriftHiveMetastore_get_databases_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_databases", bytes); + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_resource_plan") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_create_resource_plan_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - ThriftHiveMetastore_get_databases_result result; - try { - iface_->get_databases(result.success, args.pattern); - result.__isset.success = true; - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_databases"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + if (result.__isset.success) { + // _return pointer has now been filled return; } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_databases"); + if (result.__isset.o1) { + throw result.o1; } - - oprot->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_databases", bytes); + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_resource_plan failed: unknown result"); } -void ThriftHiveMetastoreProcessor::process_get_all_databases(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreClient::get_resource_plan(WMGetResourcePlanResponse& _return, const WMGetResourcePlanRequest& request) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_all_databases", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_all_databases"); + send_get_resource_plan(request); + recv_get_resource_plan(_return); +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_all_databases"); - } +void ThriftHiveMetastoreClient::send_get_resource_plan(const WMGetResourcePlanRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_databases_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + ThriftHiveMetastore_get_resource_plan_pargs args; + args.request = &request; + args.write(oprot_); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_all_databases", bytes); - } + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} - ThriftHiveMetastore_get_all_databases_result result; - try { - iface_->get_all_databases(result.success); - result.__isset.success = true; - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_all_databases"); - } +void ThriftHiveMetastoreClient::recv_get_resource_plan(WMGetResourcePlanResponse& _return) +{ - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_all_databases"); + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; } - - oprot->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_all_databases", bytes); + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); } -} - -void ThriftHiveMetastoreProcessor::process_alter_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_database", callContext); + if (fname.compare("get_resource_plan") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_database"); + ThriftHiveMetastore_get_resource_plan_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_database"); + if (result.__isset.success) { + // _return pointer has now been filled + return; } - - ThriftHiveMetastore_alter_database_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_database", bytes); + if (result.__isset.o1) { + throw result.o1; } - - ThriftHiveMetastore_alter_database_result result; - try { - iface_->alter_database(args.dbname, args.db); - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_database"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("alter_database", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; + if (result.__isset.o2) { + throw result.o2; } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_resource_plan failed: unknown result"); +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_database"); - } +void ThriftHiveMetastoreClient::get_active_resource_plan(WMGetActiveResourcePlanResponse& _return, const WMGetActiveResourcePlanRequest& request) +{ + send_get_active_resource_plan(request); + recv_get_active_resource_plan(_return); +} - oprot->writeMessageBegin("alter_database", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); +void ThriftHiveMetastoreClient::send_get_active_resource_plan(const WMGetActiveResourcePlanRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_active_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_database", bytes); - } + ThriftHiveMetastore_get_active_resource_plan_pargs args; + args.request = &request; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreProcessor::process_get_type(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreClient::recv_get_active_resource_plan(WMGetActiveResourcePlanResponse& _return) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_type", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_type"); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_type"); - } - - ThriftHiveMetastore_get_type_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_type", bytes); + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_active_resource_plan") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_active_resource_plan_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - ThriftHiveMetastore_get_type_result result; - try { - iface_->get_type(result.success, args.name); - result.__isset.success = true; - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_type"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_type", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + if (result.__isset.success) { + // _return pointer has now been filled return; } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_type"); + if (result.__isset.o2) { + throw result.o2; } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_active_resource_plan failed: unknown result"); +} - oprot->writeMessageBegin("get_type", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_type", bytes); - } +void ThriftHiveMetastoreClient::get_all_resource_plans(WMGetAllResourcePlanResponse& _return, const WMGetAllResourcePlanRequest& request) +{ + send_get_all_resource_plans(request); + recv_get_all_resource_plans(_return); } -void ThriftHiveMetastoreProcessor::process_create_type(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreClient::send_get_all_resource_plans(const WMGetAllResourcePlanRequest& request) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_type", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_type"); + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_all_resource_plans", ::apache::thrift::protocol::T_CALL, cseqid); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_type"); - } + ThriftHiveMetastore_get_all_resource_plans_pargs args; + args.request = &request; + args.write(oprot_); - ThriftHiveMetastore_create_type_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_type", bytes); - } +void ThriftHiveMetastoreClient::recv_get_all_resource_plans(WMGetAllResourcePlanResponse& _return) +{ - ThriftHiveMetastore_create_type_result result; - try { - result.success = iface_->create_type(args.type); - result.__isset.success = true; - } catch (AlreadyExistsException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (InvalidObjectException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_type"); - } + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("create_type", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_type"); + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); } + if (fname.compare("get_all_resource_plans") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_all_resource_plans_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - oprot->writeMessageBegin("create_type", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_type", bytes); + if (result.__isset.success) { + // _return pointer has now been filled + return; } + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_resource_plans failed: unknown result"); } -void ThriftHiveMetastoreProcessor::process_drop_type(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreClient::alter_resource_plan(WMAlterResourcePlanResponse& _return, const WMAlterResourcePlanRequest& request) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_type", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_type"); + send_alter_resource_plan(request); + recv_alter_resource_plan(_return); +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_type"); - } +void ThriftHiveMetastoreClient::send_alter_resource_plan(const WMAlterResourcePlanRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("alter_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_type_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + ThriftHiveMetastore_alter_resource_plan_pargs args; + args.request = &request; + args.write(oprot_); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_type", bytes); - } + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} - ThriftHiveMetastore_drop_type_result result; - try { - result.success = iface_->drop_type(args.type); - result.__isset.success = true; - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_type"); - } +void ThriftHiveMetastoreClient::recv_alter_resource_plan(WMAlterResourcePlanResponse& _return) +{ - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_type", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_type"); + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; } - - oprot->writeMessageBegin("drop_type", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_type", bytes); + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); } -} - -void ThriftHiveMetastoreProcessor::process_get_type_all(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_type_all", callContext); + if (fname.compare("alter_resource_plan") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_type_all"); + ThriftHiveMetastore_alter_resource_plan_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_type_all"); + if (result.__isset.success) { + // _return pointer has now been filled + return; } - - ThriftHiveMetastore_get_type_all_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_type_all", bytes); + if (result.__isset.o1) { + throw result.o1; } - - ThriftHiveMetastore_get_type_all_result result; - try { - iface_->get_type_all(result.success, args.name); - result.__isset.success = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_type_all"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_type_all", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; + if (result.__isset.o2) { + throw result.o2; } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_type_all"); + if (result.__isset.o3) { + throw result.o3; } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "alter_resource_plan failed: unknown result"); +} - oprot->writeMessageBegin("get_type_all", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_type_all", bytes); - } +void ThriftHiveMetastoreClient::validate_resource_plan(WMValidateResourcePlanResponse& _return, const WMValidateResourcePlanRequest& request) +{ + send_validate_resource_plan(request); + recv_validate_resource_plan(_return); } -void ThriftHiveMetastoreProcessor::process_get_fields(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreClient::send_validate_resource_plan(const WMValidateResourcePlanRequest& request) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_fields", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_fields"); + int32_t cseqid = 0; + oprot_->writeMessageBegin("validate_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_fields"); - } + ThriftHiveMetastore_validate_resource_plan_pargs args; + args.request = &request; + args.write(oprot_); - ThriftHiveMetastore_get_fields_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_fields", bytes); - } +void ThriftHiveMetastoreClient::recv_validate_resource_plan(WMValidateResourcePlanResponse& _return) +{ - ThriftHiveMetastore_get_fields_result result; - try { - iface_->get_fields(result.success, args.db_name, args.table_name); - result.__isset.success = true; - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (UnknownTableException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (UnknownDBException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_fields"); - } + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_fields", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_fields"); + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); } - - oprot->writeMessageBegin("get_fields", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_fields", bytes); + if (fname.compare("validate_resource_plan") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); } -} + ThriftHiveMetastore_validate_resource_plan_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); -void ThriftHiveMetastoreProcessor::process_get_fields_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_fields_with_environment_context", callContext); + if (result.__isset.success) { + // _return pointer has now been filled + return; } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_fields_with_environment_context"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_fields_with_environment_context"); + if (result.__isset.o1) { + throw result.o1; } - - ThriftHiveMetastore_get_fields_with_environment_context_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_fields_with_environment_context", bytes); + if (result.__isset.o2) { + throw result.o2; } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "validate_resource_plan failed: unknown result"); +} - ThriftHiveMetastore_get_fields_with_environment_context_result result; - try { - iface_->get_fields_with_environment_context(result.success, args.db_name, args.table_name, args.environment_context); - result.__isset.success = true; - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (UnknownTableException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (UnknownDBException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_fields_with_environment_context"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_fields_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } +void ThriftHiveMetastoreClient::drop_resource_plan(WMDropResourcePlanResponse& _return, const WMDropResourcePlanRequest& request) +{ + send_drop_resource_plan(request); + recv_drop_resource_plan(_return); +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_fields_with_environment_context"); - } +void ThriftHiveMetastoreClient::send_drop_resource_plan(const WMDropResourcePlanRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("drop_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - oprot->writeMessageBegin("get_fields_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + ThriftHiveMetastore_drop_resource_plan_pargs args; + args.request = &request; + args.write(oprot_); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_fields_with_environment_context", bytes); - } + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); } -void ThriftHiveMetastoreProcessor::process_get_schema(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreClient::recv_drop_resource_plan(WMDropResourcePlanResponse& _return) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_schema", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_schema"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_schema"); - } - ThriftHiveMetastore_get_schema_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_schema", bytes); + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_resource_plan") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_drop_resource_plan_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - ThriftHiveMetastore_get_schema_result result; - try { - iface_->get_schema(result.success, args.db_name, args.table_name); - result.__isset.success = true; - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (UnknownTableException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (UnknownDBException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_schema"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_schema", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + if (result.__isset.success) { + // _return pointer has now been filled return; } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_schema"); + if (result.__isset.o1) { + throw result.o1; } - - oprot->writeMessageBegin("get_schema", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_schema", bytes); + if (result.__isset.o2) { + throw result.o2; } + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_resource_plan failed: unknown result"); } -void ThriftHiveMetastoreProcessor::process_get_schema_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreClient::create_wm_trigger(WMCreateTriggerResponse& _return, const WMCreateTriggerRequest& request) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_schema_with_environment_context", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_schema_with_environment_context"); + send_create_wm_trigger(request); + recv_create_wm_trigger(_return); +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_schema_with_environment_context"); - } +void ThriftHiveMetastoreClient::send_create_wm_trigger(const WMCreateTriggerRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("create_wm_trigger", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_schema_with_environment_context_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + ThriftHiveMetastore_create_wm_trigger_pargs args; + args.request = &request; + args.write(oprot_); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_schema_with_environment_context", bytes); - } + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} - ThriftHiveMetastore_get_schema_with_environment_context_result result; - try { - iface_->get_schema_with_environment_context(result.success, args.db_name, args.table_name, args.environment_context); - result.__isset.success = true; - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (UnknownTableException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (UnknownDBException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_schema_with_environment_context"); - } +void ThriftHiveMetastoreClient::recv_create_wm_trigger(WMCreateTriggerResponse& _return) +{ - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_schema_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_schema_with_environment_context"); + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; } - - oprot->writeMessageBegin("get_schema_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_schema_with_environment_context", bytes); + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); } -} - -void ThriftHiveMetastoreProcessor::process_create_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_table", callContext); + if (fname.compare("create_wm_trigger") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_table"); + ThriftHiveMetastore_create_wm_trigger_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_table"); + if (result.__isset.success) { + // _return pointer has now been filled + return; } - - ThriftHiveMetastore_create_table_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_table", bytes); + if (result.__isset.o1) { + throw result.o1; } - - ThriftHiveMetastore_create_table_result result; - try { - iface_->create_table(args.tbl); - } catch (AlreadyExistsException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (InvalidObjectException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (NoSuchObjectException &o4) { - result.o4 = o4; - result.__isset.o4 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_table"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("create_table", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; + if (result.__isset.o2) { + throw result.o2; } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_table"); + if (result.__isset.o3) { + throw result.o3; } - - oprot->writeMessageBegin("create_table", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_table", bytes); + if (result.__isset.o4) { + throw result.o4; } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_wm_trigger failed: unknown result"); } -void ThriftHiveMetastoreProcessor::process_create_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreClient::alter_wm_trigger(WMAlterTriggerResponse& _return, const WMAlterTriggerRequest& request) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_table_with_environment_context", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_table_with_environment_context"); + send_alter_wm_trigger(request); + recv_alter_wm_trigger(_return); +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_table_with_environment_context"); - } +void ThriftHiveMetastoreClient::send_alter_wm_trigger(const WMAlterTriggerRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("alter_wm_trigger", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_with_environment_context_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + ThriftHiveMetastore_alter_wm_trigger_pargs args; + args.request = &request; + args.write(oprot_); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_table_with_environment_context", bytes); - } + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} - ThriftHiveMetastore_create_table_with_environment_context_result result; - try { - iface_->create_table_with_environment_context(args.tbl, args.environment_context); - } catch (AlreadyExistsException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (InvalidObjectException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (NoSuchObjectException &o4) { - result.o4 = o4; - result.__isset.o4 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_table_with_environment_context"); - } +void ThriftHiveMetastoreClient::recv_alter_wm_trigger(WMAlterTriggerResponse& _return) +{ - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("create_table_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_table_with_environment_context"); + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("alter_wm_trigger") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_alter_wm_trigger_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - oprot->writeMessageBegin("create_table_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_table_with_environment_context", bytes); + if (result.__isset.success) { + // _return pointer has now been filled + return; } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "alter_wm_trigger failed: unknown result"); } -void ThriftHiveMetastoreProcessor::process_create_table_with_constraints(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreClient::drop_wm_trigger(WMDropTriggerResponse& _return, const WMDropTriggerRequest& request) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_table_with_constraints", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_table_with_constraints"); + send_drop_wm_trigger(request); + recv_drop_wm_trigger(_return); +} + +void ThriftHiveMetastoreClient::send_drop_wm_trigger(const WMDropTriggerRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("drop_wm_trigger", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_wm_trigger_pargs args; + args.request = &request; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_drop_wm_trigger(WMDropTriggerResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_wm_trigger") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_drop_wm_trigger_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_wm_trigger failed: unknown result"); +} + +void ThriftHiveMetastoreClient::get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const WMGetTriggersForResourePlanRequest& request) +{ + send_get_triggers_for_resourceplan(request); + recv_get_triggers_for_resourceplan(_return); +} + +void ThriftHiveMetastoreClient::send_get_triggers_for_resourceplan(const WMGetTriggersForResourePlanRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_triggers_for_resourceplan", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_triggers_for_resourceplan_pargs args; + args.request = &request; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_triggers_for_resourceplan") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_triggers_for_resourceplan_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_triggers_for_resourceplan failed: unknown result"); +} + +void ThriftHiveMetastoreClient::create_or_update_wm_pool(WMCreateOrUpdatePoolResponse& _return, const WMCreateOrUpdatePoolRequest& request) +{ + send_create_or_update_wm_pool(request); + recv_create_or_update_wm_pool(_return); +} + +void ThriftHiveMetastoreClient::send_create_or_update_wm_pool(const WMCreateOrUpdatePoolRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("create_or_update_wm_pool", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_create_or_update_wm_pool_pargs args; + args.request = &request; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_create_or_update_wm_pool(WMCreateOrUpdatePoolResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_or_update_wm_pool") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_create_or_update_wm_pool_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_or_update_wm_pool failed: unknown result"); +} + +void ThriftHiveMetastoreClient::drop_wm_pool(WMDropPoolResponse& _return, const WMDropPoolRequest& request) +{ + send_drop_wm_pool(request); + recv_drop_wm_pool(_return); +} + +void ThriftHiveMetastoreClient::send_drop_wm_pool(const WMDropPoolRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("drop_wm_pool", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_wm_pool_pargs args; + args.request = &request; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_drop_wm_pool(WMDropPoolResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_wm_pool") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_drop_wm_pool_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_wm_pool failed: unknown result"); +} + +void ThriftHiveMetastoreClient::create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request) +{ + send_create_or_update_wm_mapping(request); + recv_create_or_update_wm_mapping(_return); +} + +void ThriftHiveMetastoreClient::send_create_or_update_wm_mapping(const WMCreateOrUpdateMappingRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("create_or_update_wm_mapping", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_create_or_update_wm_mapping_pargs args; + args.request = &request; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_or_update_wm_mapping") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_create_or_update_wm_mapping_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_or_update_wm_mapping failed: unknown result"); +} + +void ThriftHiveMetastoreClient::drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request) +{ + send_drop_wm_mapping(request); + recv_drop_wm_mapping(_return); +} + +void ThriftHiveMetastoreClient::send_drop_wm_mapping(const WMDropMappingRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("drop_wm_mapping", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_wm_mapping_pargs args; + args.request = &request; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_drop_wm_mapping(WMDropMappingResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_wm_mapping") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_drop_wm_mapping_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_wm_mapping failed: unknown result"); +} + +void ThriftHiveMetastoreClient::create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request) +{ + send_create_or_drop_wm_trigger_to_pool_mapping(request); + recv_create_or_drop_wm_trigger_to_pool_mapping(_return); +} + +void ThriftHiveMetastoreClient::send_create_or_drop_wm_trigger_to_pool_mapping(const WMCreateOrDropTriggerToPoolMappingRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("create_or_drop_wm_trigger_to_pool_mapping", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_pargs args; + args.request = &request; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_or_drop_wm_trigger_to_pool_mapping") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_or_drop_wm_trigger_to_pool_mapping failed: unknown result"); +} + +bool ThriftHiveMetastoreProcessor::dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext) { + ProcessMap::iterator pfn; + pfn = processMap_.find(fname); + if (pfn == processMap_.end()) { + return ::facebook::fb303::FacebookServiceProcessor::dispatchCall(iprot, oprot, fname, seqid, callContext); + } + (this->*(pfn->second))(seqid, iprot, oprot, callContext); + return true; +} +void ThriftHiveMetastoreProcessor::process_getMetaConf(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_table_with_constraints"); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.getMetaConf", callContext); } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.getMetaConf"); - ThriftHiveMetastore_create_table_with_constraints_args args; + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.getMetaConf"); + } + + ThriftHiveMetastore_getMetaConf_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_table_with_constraints", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.getMetaConf", bytes); } - ThriftHiveMetastore_create_table_with_constraints_result result; + ThriftHiveMetastore_getMetaConf_result result; try { - iface_->create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints); - } catch (AlreadyExistsException &o1) { + iface_->getMetaConf(result.success, args.key); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (InvalidObjectException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (NoSuchObjectException &o4) { - result.o4 = o4; - result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_table_with_constraints"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.getMetaConf"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("getMetaConf", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -54678,57 +55217,54 @@ void ThriftHiveMetastoreProcessor::process_create_table_with_constraints(int32_t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_table_with_constraints"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.getMetaConf"); } - oprot->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("getMetaConf", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_table_with_constraints", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.getMetaConf", bytes); } } -void ThriftHiveMetastoreProcessor::process_drop_constraint(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_setMetaConf(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_constraint", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.setMetaConf", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_constraint"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.setMetaConf"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_constraint"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.setMetaConf"); } - ThriftHiveMetastore_drop_constraint_args args; + ThriftHiveMetastore_setMetaConf_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_constraint", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.setMetaConf", bytes); } - ThriftHiveMetastore_drop_constraint_result result; + ThriftHiveMetastore_setMetaConf_result result; try { - iface_->drop_constraint(args.req); - } catch (NoSuchObjectException &o1) { + iface_->setMetaConf(args.key, args.value); + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_constraint"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.setMetaConf"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_constraint", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("setMetaConf", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -54737,57 +55273,60 @@ void ThriftHiveMetastoreProcessor::process_drop_constraint(int32_t seqid, ::apac } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_constraint"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.setMetaConf"); } - oprot->writeMessageBegin("drop_constraint", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("setMetaConf", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_constraint", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.setMetaConf", bytes); } } -void ThriftHiveMetastoreProcessor::process_add_primary_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_create_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_primary_key", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_database", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_primary_key"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_database"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_primary_key"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_database"); } - ThriftHiveMetastore_add_primary_key_args args; + ThriftHiveMetastore_create_database_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_primary_key", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_database", bytes); } - ThriftHiveMetastore_add_primary_key_result result; + ThriftHiveMetastore_create_database_result result; try { - iface_->add_primary_key(args.req); - } catch (NoSuchObjectException &o1) { + iface_->create_database(args.database); + } catch (AlreadyExistsException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (InvalidObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_primary_key"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_database"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("add_primary_key", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("create_database", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -54796,44 +55335,45 @@ void ThriftHiveMetastoreProcessor::process_add_primary_key(int32_t seqid, ::apac } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_primary_key"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_database"); } - oprot->writeMessageBegin("add_primary_key", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("create_database", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_primary_key", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_database", bytes); } } -void ThriftHiveMetastoreProcessor::process_add_foreign_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_foreign_key", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_database", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_foreign_key"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_database"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_foreign_key"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_database"); } - ThriftHiveMetastore_add_foreign_key_args args; + ThriftHiveMetastore_get_database_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_foreign_key", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_database", bytes); } - ThriftHiveMetastore_add_foreign_key_result result; + ThriftHiveMetastore_get_database_result result; try { - iface_->add_foreign_key(args.req); + iface_->get_database(result.success, args.name); + result.__isset.success = true; } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; @@ -54842,11 +55382,11 @@ void ThriftHiveMetastoreProcessor::process_add_foreign_key(int32_t seqid, ::apac result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_foreign_key"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_database"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("add_foreign_key", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_database", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -54855,57 +55395,60 @@ void ThriftHiveMetastoreProcessor::process_add_foreign_key(int32_t seqid, ::apac } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_foreign_key"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_database"); } - oprot->writeMessageBegin("add_foreign_key", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_database", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_foreign_key", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_database", bytes); } } -void ThriftHiveMetastoreProcessor::process_add_unique_constraint(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_drop_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_unique_constraint", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_database", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_unique_constraint"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_database"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_unique_constraint"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_database"); } - ThriftHiveMetastore_add_unique_constraint_args args; + ThriftHiveMetastore_drop_database_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_unique_constraint", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_database", bytes); } - ThriftHiveMetastore_add_unique_constraint_result result; + ThriftHiveMetastore_drop_database_result result; try { - iface_->add_unique_constraint(args.req); + iface_->drop_database(args.name, args.deleteData, args.cascade); } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (InvalidOperationException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_unique_constraint"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_database"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("add_unique_constraint", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -54914,57 +55457,55 @@ void ThriftHiveMetastoreProcessor::process_add_unique_constraint(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_unique_constraint"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_database"); } - oprot->writeMessageBegin("add_unique_constraint", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_unique_constraint", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_database", bytes); } } -void ThriftHiveMetastoreProcessor::process_add_not_null_constraint(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_databases(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_not_null_constraint", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_databases", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_not_null_constraint"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_databases"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_not_null_constraint"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_databases"); } - ThriftHiveMetastore_add_not_null_constraint_args args; + ThriftHiveMetastore_get_databases_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_not_null_constraint", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_databases", bytes); } - ThriftHiveMetastore_add_not_null_constraint_result result; + ThriftHiveMetastore_get_databases_result result; try { - iface_->add_not_null_constraint(args.req); - } catch (NoSuchObjectException &o1) { + iface_->get_databases(result.success, args.pattern); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_not_null_constraint"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_databases"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("add_not_null_constraint", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -54973,57 +55514,55 @@ void ThriftHiveMetastoreProcessor::process_add_not_null_constraint(int32_t seqid } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_not_null_constraint"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_databases"); } - oprot->writeMessageBegin("add_not_null_constraint", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_not_null_constraint", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_databases", bytes); } } -void ThriftHiveMetastoreProcessor::process_drop_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_all_databases(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_table", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_all_databases", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_table"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_all_databases"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_table"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_all_databases"); } - ThriftHiveMetastore_drop_table_args args; + ThriftHiveMetastore_get_all_databases_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_table", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_all_databases", bytes); } - ThriftHiveMetastore_drop_table_result result; + ThriftHiveMetastore_get_all_databases_result result; try { - iface_->drop_table(args.dbname, args.name, args.deleteData); - } catch (NoSuchObjectException &o1) { + iface_->get_all_databases(result.success); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_table"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_all_databases"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_table", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55032,57 +55571,57 @@ void ThriftHiveMetastoreProcessor::process_drop_table(int32_t seqid, ::apache::t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_table"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_all_databases"); } - oprot->writeMessageBegin("drop_table", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_table", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_all_databases", bytes); } } -void ThriftHiveMetastoreProcessor::process_drop_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_alter_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_table_with_environment_context", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_database", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_table_with_environment_context"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_database"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_table_with_environment_context"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_database"); } - ThriftHiveMetastore_drop_table_with_environment_context_args args; + ThriftHiveMetastore_alter_database_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_table_with_environment_context", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_database", bytes); } - ThriftHiveMetastore_drop_table_with_environment_context_result result; + ThriftHiveMetastore_alter_database_result result; try { - iface_->drop_table_with_environment_context(args.dbname, args.name, args.deleteData, args.environment_context); - } catch (NoSuchObjectException &o1) { + iface_->alter_database(args.dbname, args.db); + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_table_with_environment_context"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_database"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_table_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("alter_database", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55091,54 +55630,58 @@ void ThriftHiveMetastoreProcessor::process_drop_table_with_environment_context(i } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_table_with_environment_context"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_database"); } - oprot->writeMessageBegin("drop_table_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("alter_database", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_table_with_environment_context", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_database", bytes); } } -void ThriftHiveMetastoreProcessor::process_truncate_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_type(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.truncate_table", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_type", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.truncate_table"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_type"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.truncate_table"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_type"); } - ThriftHiveMetastore_truncate_table_args args; + ThriftHiveMetastore_get_type_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.truncate_table", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_type", bytes); } - ThriftHiveMetastore_truncate_table_result result; + ThriftHiveMetastore_get_type_result result; try { - iface_->truncate_table(args.dbName, args.tableName, args.partNames); + iface_->get_type(result.success, args.name); + result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.truncate_table"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_type"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("truncate_table", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_type", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55147,55 +55690,61 @@ void ThriftHiveMetastoreProcessor::process_truncate_table(int32_t seqid, ::apach } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.truncate_table"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_type"); } - oprot->writeMessageBegin("truncate_table", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_type", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.truncate_table", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_type", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_tables(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_create_type(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_tables", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_type", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_tables"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_type"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_tables"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_type"); } - ThriftHiveMetastore_get_tables_args args; + ThriftHiveMetastore_create_type_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_tables", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_type", bytes); } - ThriftHiveMetastore_get_tables_result result; + ThriftHiveMetastore_create_type_result result; try { - iface_->get_tables(result.success, args.db_name, args.pattern); + result.success = iface_->create_type(args.type); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (AlreadyExistsException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (InvalidObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_tables"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_type"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_tables", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("create_type", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55204,55 +55753,58 @@ void ThriftHiveMetastoreProcessor::process_get_tables(int32_t seqid, ::apache::t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_tables"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_type"); } - oprot->writeMessageBegin("get_tables", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("create_type", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_tables", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_type", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_tables_by_type(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_drop_type(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_tables_by_type", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_type", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_tables_by_type"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_type"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_tables_by_type"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_type"); } - ThriftHiveMetastore_get_tables_by_type_args args; + ThriftHiveMetastore_drop_type_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_tables_by_type", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_type", bytes); } - ThriftHiveMetastore_get_tables_by_type_result result; + ThriftHiveMetastore_drop_type_result result; try { - iface_->get_tables_by_type(result.success, args.db_name, args.pattern, args.tableType); + result.success = iface_->drop_type(args.type); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_tables_by_type"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_type"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_tables_by_type", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("drop_type", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55261,55 +55813,55 @@ void ThriftHiveMetastoreProcessor::process_get_tables_by_type(int32_t seqid, ::a } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_tables_by_type"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_type"); } - oprot->writeMessageBegin("get_tables_by_type", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("drop_type", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_tables_by_type", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_type", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_table_meta(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_type_all(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table_meta", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_type_all", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table_meta"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_type_all"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table_meta"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_type_all"); } - ThriftHiveMetastore_get_table_meta_args args; + ThriftHiveMetastore_get_type_all_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table_meta", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_type_all", bytes); } - ThriftHiveMetastore_get_table_meta_result result; + ThriftHiveMetastore_get_type_all_result result; try { - iface_->get_table_meta(result.success, args.db_patterns, args.tbl_patterns, args.tbl_types); + iface_->get_type_all(result.success, args.name); result.__isset.success = true; - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table_meta"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_type_all"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_table_meta", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_type_all", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55318,55 +55870,61 @@ void ThriftHiveMetastoreProcessor::process_get_table_meta(int32_t seqid, ::apach } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table_meta"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_type_all"); } - oprot->writeMessageBegin("get_table_meta", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_type_all", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table_meta", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_type_all", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_all_tables(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_fields(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_all_tables", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_fields", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_all_tables"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_fields"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_all_tables"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_fields"); } - ThriftHiveMetastore_get_all_tables_args args; + ThriftHiveMetastore_get_fields_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_all_tables", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_fields", bytes); } - ThriftHiveMetastore_get_all_tables_result result; + ThriftHiveMetastore_get_fields_result result; try { - iface_->get_all_tables(result.success, args.db_name); + iface_->get_fields(result.success, args.db_name, args.table_name); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (UnknownTableException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (UnknownDBException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_all_tables"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_fields"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_all_tables", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_fields", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55375,58 +55933,61 @@ void ThriftHiveMetastoreProcessor::process_get_all_tables(int32_t seqid, ::apach } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_all_tables"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_fields"); } - oprot->writeMessageBegin("get_all_tables", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_fields", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_all_tables", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_fields", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_fields_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_fields_with_environment_context", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_fields_with_environment_context"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_fields_with_environment_context"); } - ThriftHiveMetastore_get_table_args args; + ThriftHiveMetastore_get_fields_with_environment_context_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_fields_with_environment_context", bytes); } - ThriftHiveMetastore_get_table_result result; + ThriftHiveMetastore_get_fields_with_environment_context_result result; try { - iface_->get_table(result.success, args.dbname, args.tbl_name); + iface_->get_fields_with_environment_context(result.success, args.db_name, args.table_name, args.environment_context); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (UnknownTableException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (UnknownDBException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_fields_with_environment_context"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_table", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_fields_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55435,52 +55996,61 @@ void ThriftHiveMetastoreProcessor::process_get_table(int32_t seqid, ::apache::th } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_fields_with_environment_context"); } - oprot->writeMessageBegin("get_table", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_fields_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_fields_with_environment_context", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_table_objects_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_schema(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table_objects_by_name", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_schema", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table_objects_by_name"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_schema"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table_objects_by_name"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_schema"); } - ThriftHiveMetastore_get_table_objects_by_name_args args; + ThriftHiveMetastore_get_schema_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table_objects_by_name", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_schema", bytes); } - ThriftHiveMetastore_get_table_objects_by_name_result result; + ThriftHiveMetastore_get_schema_result result; try { - iface_->get_table_objects_by_name(result.success, args.dbname, args.tbl_names); + iface_->get_schema(result.success, args.db_name, args.table_name); result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (UnknownTableException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (UnknownDBException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table_objects_by_name"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_schema"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_table_objects_by_name", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_schema", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55489,58 +56059,61 @@ void ThriftHiveMetastoreProcessor::process_get_table_objects_by_name(int32_t seq } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table_objects_by_name"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_schema"); } - oprot->writeMessageBegin("get_table_objects_by_name", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_schema", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table_objects_by_name", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_schema", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_table_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_schema_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table_req", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_schema_with_environment_context", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table_req"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_schema_with_environment_context"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table_req"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_schema_with_environment_context"); } - ThriftHiveMetastore_get_table_req_args args; + ThriftHiveMetastore_get_schema_with_environment_context_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table_req", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_schema_with_environment_context", bytes); } - ThriftHiveMetastore_get_table_req_result result; + ThriftHiveMetastore_get_schema_with_environment_context_result result; try { - iface_->get_table_req(result.success, args.req); + iface_->get_schema_with_environment_context(result.success, args.db_name, args.table_name, args.environment_context); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (UnknownTableException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (UnknownDBException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table_req"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_schema_with_environment_context"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_table_req", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_schema_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55549,61 +56122,63 @@ void ThriftHiveMetastoreProcessor::process_get_table_req(int32_t seqid, ::apache } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table_req"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_schema_with_environment_context"); } - oprot->writeMessageBegin("get_table_req", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_schema_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table_req", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_schema_with_environment_context", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_table_objects_by_name_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_create_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table_objects_by_name_req", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_table", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table_objects_by_name_req"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_table"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table_objects_by_name_req"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_table"); } - ThriftHiveMetastore_get_table_objects_by_name_req_args args; + ThriftHiveMetastore_create_table_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table_objects_by_name_req", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_table", bytes); } - ThriftHiveMetastore_get_table_objects_by_name_req_result result; + ThriftHiveMetastore_create_table_result result; try { - iface_->get_table_objects_by_name_req(result.success, args.req); - result.__isset.success = true; - } catch (MetaException &o1) { + iface_->create_table(args.tbl); + } catch (AlreadyExistsException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (InvalidOperationException &o2) { + } catch (InvalidObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (UnknownDBException &o3) { + } catch (MetaException &o3) { result.o3 = o3; result.__isset.o3 = true; + } catch (NoSuchObjectException &o4) { + result.o4 = o4; + result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table_objects_by_name_req"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_table"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_table_objects_by_name_req", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("create_table", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55612,61 +56187,63 @@ void ThriftHiveMetastoreProcessor::process_get_table_objects_by_name_req(int32_t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table_objects_by_name_req"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_table"); } - oprot->writeMessageBegin("get_table_objects_by_name_req", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("create_table", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table_objects_by_name_req", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_table", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_table_names_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_create_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table_names_by_filter", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_table_with_environment_context", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table_names_by_filter"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_table_with_environment_context"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table_names_by_filter"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_table_with_environment_context"); } - ThriftHiveMetastore_get_table_names_by_filter_args args; + ThriftHiveMetastore_create_table_with_environment_context_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table_names_by_filter", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_table_with_environment_context", bytes); } - ThriftHiveMetastore_get_table_names_by_filter_result result; + ThriftHiveMetastore_create_table_with_environment_context_result result; try { - iface_->get_table_names_by_filter(result.success, args.dbname, args.filter, args.max_tables); - result.__isset.success = true; - } catch (MetaException &o1) { + iface_->create_table_with_environment_context(args.tbl, args.environment_context); + } catch (AlreadyExistsException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (InvalidOperationException &o2) { + } catch (InvalidObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (UnknownDBException &o3) { + } catch (MetaException &o3) { result.o3 = o3; result.__isset.o3 = true; + } catch (NoSuchObjectException &o4) { + result.o4 = o4; + result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table_names_by_filter"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_table_with_environment_context"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_table_names_by_filter", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("create_table_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55675,57 +56252,63 @@ void ThriftHiveMetastoreProcessor::process_get_table_names_by_filter(int32_t seq } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table_names_by_filter"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_table_with_environment_context"); } - oprot->writeMessageBegin("get_table_names_by_filter", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("create_table_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table_names_by_filter", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_table_with_environment_context", bytes); } } -void ThriftHiveMetastoreProcessor::process_alter_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_create_table_with_constraints(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_table", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_table_with_constraints", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_table"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_table_with_constraints"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_table"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_table_with_constraints"); } - ThriftHiveMetastore_alter_table_args args; + ThriftHiveMetastore_create_table_with_constraints_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_table", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_table_with_constraints", bytes); } - ThriftHiveMetastore_alter_table_result result; + ThriftHiveMetastore_create_table_with_constraints_result result; try { - iface_->alter_table(args.dbname, args.tbl_name, args.new_tbl); - } catch (InvalidOperationException &o1) { + iface_->create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints); + } catch (AlreadyExistsException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (InvalidObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (NoSuchObjectException &o4) { + result.o4 = o4; + result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_table"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_table_with_constraints"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("alter_table", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55734,57 +56317,57 @@ void ThriftHiveMetastoreProcessor::process_alter_table(int32_t seqid, ::apache:: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_table"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_table_with_constraints"); } - oprot->writeMessageBegin("alter_table", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_table", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_table_with_constraints", bytes); } } -void ThriftHiveMetastoreProcessor::process_alter_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_drop_constraint(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_table_with_environment_context", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_constraint", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_table_with_environment_context"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_constraint"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_table_with_environment_context"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_constraint"); } - ThriftHiveMetastore_alter_table_with_environment_context_args args; + ThriftHiveMetastore_drop_constraint_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_table_with_environment_context", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_constraint", bytes); } - ThriftHiveMetastore_alter_table_with_environment_context_result result; + ThriftHiveMetastore_drop_constraint_result result; try { - iface_->alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context); - } catch (InvalidOperationException &o1) { + iface_->drop_constraint(args.req); + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_table_with_environment_context"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_constraint"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("alter_table_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("drop_constraint", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55793,45 +56376,45 @@ void ThriftHiveMetastoreProcessor::process_alter_table_with_environment_context( } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_table_with_environment_context"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_constraint"); } - oprot->writeMessageBegin("alter_table_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("drop_constraint", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_table_with_environment_context", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_constraint", bytes); } } -void ThriftHiveMetastoreProcessor::process_alter_table_with_cascade(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_add_primary_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_table_with_cascade", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_primary_key", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_table_with_cascade"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_primary_key"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_table_with_cascade"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_primary_key"); } - ThriftHiveMetastore_alter_table_with_cascade_args args; + ThriftHiveMetastore_add_primary_key_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_table_with_cascade", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_primary_key", bytes); } - ThriftHiveMetastore_alter_table_with_cascade_result result; + ThriftHiveMetastore_add_primary_key_result result; try { - iface_->alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade); - } catch (InvalidOperationException &o1) { + iface_->add_primary_key(args.req); + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; } catch (MetaException &o2) { @@ -55839,11 +56422,11 @@ void ThriftHiveMetastoreProcessor::process_alter_table_with_cascade(int32_t seqi result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_table_with_cascade"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_primary_key"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("add_primary_key", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55852,61 +56435,57 @@ void ThriftHiveMetastoreProcessor::process_alter_table_with_cascade(int32_t seqi } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_table_with_cascade"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_primary_key"); } - oprot->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("add_primary_key", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_table_with_cascade", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_primary_key", bytes); } } -void ThriftHiveMetastoreProcessor::process_add_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_add_foreign_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_partition", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_foreign_key", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_partition"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_foreign_key"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_partition"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_foreign_key"); } - ThriftHiveMetastore_add_partition_args args; + ThriftHiveMetastore_add_foreign_key_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_partition", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_foreign_key", bytes); } - ThriftHiveMetastore_add_partition_result result; + ThriftHiveMetastore_add_foreign_key_result result; try { - iface_->add_partition(result.success, args.new_part); - result.__isset.success = true; - } catch (InvalidObjectException &o1) { + iface_->add_foreign_key(args.req); + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (AlreadyExistsException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_partition"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_foreign_key"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("add_partition", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("add_foreign_key", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55915,61 +56494,57 @@ void ThriftHiveMetastoreProcessor::process_add_partition(int32_t seqid, ::apache } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_partition"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_foreign_key"); } - oprot->writeMessageBegin("add_partition", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("add_foreign_key", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_partition", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_foreign_key", bytes); } } -void ThriftHiveMetastoreProcessor::process_add_partition_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_add_unique_constraint(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_partition_with_environment_context", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_unique_constraint", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_partition_with_environment_context"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_unique_constraint"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_partition_with_environment_context"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_unique_constraint"); } - ThriftHiveMetastore_add_partition_with_environment_context_args args; + ThriftHiveMetastore_add_unique_constraint_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_partition_with_environment_context", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_unique_constraint", bytes); } - ThriftHiveMetastore_add_partition_with_environment_context_result result; + ThriftHiveMetastore_add_unique_constraint_result result; try { - iface_->add_partition_with_environment_context(result.success, args.new_part, args.environment_context); - result.__isset.success = true; - } catch (InvalidObjectException &o1) { + iface_->add_unique_constraint(args.req); + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (AlreadyExistsException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_partition_with_environment_context"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_unique_constraint"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("add_partition_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("add_unique_constraint", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55978,61 +56553,57 @@ void ThriftHiveMetastoreProcessor::process_add_partition_with_environment_contex } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_partition_with_environment_context"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_unique_constraint"); } - oprot->writeMessageBegin("add_partition_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("add_unique_constraint", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_partition_with_environment_context", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_unique_constraint", bytes); } } -void ThriftHiveMetastoreProcessor::process_add_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_add_not_null_constraint(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_partitions", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_not_null_constraint", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_partitions"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_not_null_constraint"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_partitions"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_not_null_constraint"); } - ThriftHiveMetastore_add_partitions_args args; + ThriftHiveMetastore_add_not_null_constraint_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_partitions", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_not_null_constraint", bytes); } - ThriftHiveMetastore_add_partitions_result result; + ThriftHiveMetastore_add_not_null_constraint_result result; try { - result.success = iface_->add_partitions(args.new_parts); - result.__isset.success = true; - } catch (InvalidObjectException &o1) { + iface_->add_not_null_constraint(args.req); + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (AlreadyExistsException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_partitions"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_not_null_constraint"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("add_partitions", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("add_not_null_constraint", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56041,61 +56612,57 @@ void ThriftHiveMetastoreProcessor::process_add_partitions(int32_t seqid, ::apach } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_partitions"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_not_null_constraint"); } - oprot->writeMessageBegin("add_partitions", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("add_not_null_constraint", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_partitions", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_not_null_constraint", bytes); } } -void ThriftHiveMetastoreProcessor::process_add_partitions_pspec(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_drop_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_partitions_pspec", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_table", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_partitions_pspec"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_table"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_partitions_pspec"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_table"); } - ThriftHiveMetastore_add_partitions_pspec_args args; + ThriftHiveMetastore_drop_table_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_partitions_pspec", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_table", bytes); } - ThriftHiveMetastore_add_partitions_pspec_result result; + ThriftHiveMetastore_drop_table_result result; try { - result.success = iface_->add_partitions_pspec(args.new_parts); - result.__isset.success = true; - } catch (InvalidObjectException &o1) { + iface_->drop_table(args.dbname, args.name, args.deleteData); + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (AlreadyExistsException &o2) { - result.o2 = o2; - result.__isset.o2 = true; } catch (MetaException &o3) { result.o3 = o3; result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_partitions_pspec"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_table"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("add_partitions_pspec", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("drop_table", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56104,61 +56671,57 @@ void ThriftHiveMetastoreProcessor::process_add_partitions_pspec(int32_t seqid, : } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_partitions_pspec"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_table"); } - oprot->writeMessageBegin("add_partitions_pspec", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("drop_table", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_partitions_pspec", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_table", bytes); } } -void ThriftHiveMetastoreProcessor::process_append_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_drop_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.append_partition", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_table_with_environment_context", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.append_partition"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_table_with_environment_context"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.append_partition"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_table_with_environment_context"); } - ThriftHiveMetastore_append_partition_args args; + ThriftHiveMetastore_drop_table_with_environment_context_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.append_partition", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_table_with_environment_context", bytes); } - ThriftHiveMetastore_append_partition_result result; + ThriftHiveMetastore_drop_table_with_environment_context_result result; try { - iface_->append_partition(result.success, args.db_name, args.tbl_name, args.part_vals); - result.__isset.success = true; - } catch (InvalidObjectException &o1) { + iface_->drop_table_with_environment_context(args.dbname, args.name, args.deleteData, args.environment_context); + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (AlreadyExistsException &o2) { - result.o2 = o2; - result.__isset.o2 = true; } catch (MetaException &o3) { result.o3 = o3; result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.append_partition"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_table_with_environment_context"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("append_partition", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("drop_table_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56167,61 +56730,54 @@ void ThriftHiveMetastoreProcessor::process_append_partition(int32_t seqid, ::apa } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.append_partition"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_table_with_environment_context"); } - oprot->writeMessageBegin("append_partition", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("drop_table_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.append_partition", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_table_with_environment_context", bytes); } } -void ThriftHiveMetastoreProcessor::process_add_partitions_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_truncate_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_partitions_req", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.truncate_table", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_partitions_req"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.truncate_table"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_partitions_req"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.truncate_table"); } - ThriftHiveMetastore_add_partitions_req_args args; + ThriftHiveMetastore_truncate_table_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_partitions_req", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.truncate_table", bytes); } - ThriftHiveMetastore_add_partitions_req_result result; + ThriftHiveMetastore_truncate_table_result result; try { - iface_->add_partitions_req(result.success, args.request); - result.__isset.success = true; - } catch (InvalidObjectException &o1) { + iface_->truncate_table(args.dbName, args.tableName, args.partNames); + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (AlreadyExistsException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_partitions_req"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.truncate_table"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("add_partitions_req", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("truncate_table", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56230,61 +56786,55 @@ void ThriftHiveMetastoreProcessor::process_add_partitions_req(int32_t seqid, ::a } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_partitions_req"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.truncate_table"); } - oprot->writeMessageBegin("add_partitions_req", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("truncate_table", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_partitions_req", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.truncate_table", bytes); } } -void ThriftHiveMetastoreProcessor::process_append_partition_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_tables(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.append_partition_with_environment_context", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_tables", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.append_partition_with_environment_context"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_tables"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.append_partition_with_environment_context"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_tables"); } - ThriftHiveMetastore_append_partition_with_environment_context_args args; + ThriftHiveMetastore_get_tables_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.append_partition_with_environment_context", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_tables", bytes); } - ThriftHiveMetastore_append_partition_with_environment_context_result result; + ThriftHiveMetastore_get_tables_result result; try { - iface_->append_partition_with_environment_context(result.success, args.db_name, args.tbl_name, args.part_vals, args.environment_context); + iface_->get_tables(result.success, args.db_name, args.pattern); result.__isset.success = true; - } catch (InvalidObjectException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (AlreadyExistsException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.append_partition_with_environment_context"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_tables"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("append_partition_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_tables", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56293,61 +56843,55 @@ void ThriftHiveMetastoreProcessor::process_append_partition_with_environment_con } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.append_partition_with_environment_context"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_tables"); } - oprot->writeMessageBegin("append_partition_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_tables", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.append_partition_with_environment_context", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_tables", bytes); } } -void ThriftHiveMetastoreProcessor::process_append_partition_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_tables_by_type(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.append_partition_by_name", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_tables_by_type", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.append_partition_by_name"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_tables_by_type"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.append_partition_by_name"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_tables_by_type"); } - ThriftHiveMetastore_append_partition_by_name_args args; + ThriftHiveMetastore_get_tables_by_type_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.append_partition_by_name", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_tables_by_type", bytes); } - ThriftHiveMetastore_append_partition_by_name_result result; + ThriftHiveMetastore_get_tables_by_type_result result; try { - iface_->append_partition_by_name(result.success, args.db_name, args.tbl_name, args.part_name); + iface_->get_tables_by_type(result.success, args.db_name, args.pattern, args.tableType); result.__isset.success = true; - } catch (InvalidObjectException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (AlreadyExistsException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.append_partition_by_name"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_tables_by_type"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("append_partition_by_name", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_tables_by_type", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56356,61 +56900,55 @@ void ThriftHiveMetastoreProcessor::process_append_partition_by_name(int32_t seqi } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.append_partition_by_name"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_tables_by_type"); } - oprot->writeMessageBegin("append_partition_by_name", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_tables_by_type", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.append_partition_by_name", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_tables_by_type", bytes); } } -void ThriftHiveMetastoreProcessor::process_append_partition_by_name_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_table_meta(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.append_partition_by_name_with_environment_context", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table_meta", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.append_partition_by_name_with_environment_context"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table_meta"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.append_partition_by_name_with_environment_context"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table_meta"); } - ThriftHiveMetastore_append_partition_by_name_with_environment_context_args args; + ThriftHiveMetastore_get_table_meta_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.append_partition_by_name_with_environment_context", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table_meta", bytes); } - ThriftHiveMetastore_append_partition_by_name_with_environment_context_result result; + ThriftHiveMetastore_get_table_meta_result result; try { - iface_->append_partition_by_name_with_environment_context(result.success, args.db_name, args.tbl_name, args.part_name, args.environment_context); + iface_->get_table_meta(result.success, args.db_patterns, args.tbl_patterns, args.tbl_types); result.__isset.success = true; - } catch (InvalidObjectException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (AlreadyExistsException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.append_partition_by_name_with_environment_context"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table_meta"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("append_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_table_meta", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56419,58 +56957,55 @@ void ThriftHiveMetastoreProcessor::process_append_partition_by_name_with_environ } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.append_partition_by_name_with_environment_context"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table_meta"); } - oprot->writeMessageBegin("append_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_table_meta", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.append_partition_by_name_with_environment_context", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table_meta", bytes); } } -void ThriftHiveMetastoreProcessor::process_drop_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_all_tables(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_partition", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_all_tables", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_partition"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_all_tables"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_partition"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_all_tables"); } - ThriftHiveMetastore_drop_partition_args args; + ThriftHiveMetastore_get_all_tables_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_partition", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_all_tables", bytes); } - ThriftHiveMetastore_drop_partition_result result; + ThriftHiveMetastore_get_all_tables_result result; try { - result.success = iface_->drop_partition(args.db_name, args.tbl_name, args.part_vals, args.deleteData); + iface_->get_all_tables(result.success, args.db_name); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_partition"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_all_tables"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_partition", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_all_tables", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56479,58 +57014,58 @@ void ThriftHiveMetastoreProcessor::process_drop_partition(int32_t seqid, ::apach } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_partition"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_all_tables"); } - oprot->writeMessageBegin("drop_partition", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_all_tables", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_partition", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_all_tables", bytes); } } -void ThriftHiveMetastoreProcessor::process_drop_partition_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_partition_with_environment_context", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_partition_with_environment_context"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_partition_with_environment_context"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table"); } - ThriftHiveMetastore_drop_partition_with_environment_context_args args; + ThriftHiveMetastore_get_table_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_partition_with_environment_context", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table", bytes); } - ThriftHiveMetastore_drop_partition_with_environment_context_result result; + ThriftHiveMetastore_get_table_result result; try { - result.success = iface_->drop_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.deleteData, args.environment_context); + iface_->get_table(result.success, args.dbname, args.tbl_name); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_partition_with_environment_context"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_partition_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_table", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56539,58 +57074,52 @@ void ThriftHiveMetastoreProcessor::process_drop_partition_with_environment_conte } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_partition_with_environment_context"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table"); } - oprot->writeMessageBegin("drop_partition_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_table", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_partition_with_environment_context", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table", bytes); } } -void ThriftHiveMetastoreProcessor::process_drop_partition_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_table_objects_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_partition_by_name", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table_objects_by_name", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_partition_by_name"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table_objects_by_name"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_partition_by_name"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table_objects_by_name"); } - ThriftHiveMetastore_drop_partition_by_name_args args; + ThriftHiveMetastore_get_table_objects_by_name_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_partition_by_name", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table_objects_by_name", bytes); } - ThriftHiveMetastore_drop_partition_by_name_result result; + ThriftHiveMetastore_get_table_objects_by_name_result result; try { - result.success = iface_->drop_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.deleteData); + iface_->get_table_objects_by_name(result.success, args.dbname, args.tbl_names); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_partition_by_name"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table_objects_by_name"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_partition_by_name", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_table_objects_by_name", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56599,58 +57128,58 @@ void ThriftHiveMetastoreProcessor::process_drop_partition_by_name(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_partition_by_name"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table_objects_by_name"); } - oprot->writeMessageBegin("drop_partition_by_name", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_table_objects_by_name", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_partition_by_name", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table_objects_by_name", bytes); } } -void ThriftHiveMetastoreProcessor::process_drop_partition_by_name_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_table_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_partition_by_name_with_environment_context", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table_req", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_partition_by_name_with_environment_context"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table_req"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_partition_by_name_with_environment_context"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table_req"); } - ThriftHiveMetastore_drop_partition_by_name_with_environment_context_args args; + ThriftHiveMetastore_get_table_req_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_partition_by_name_with_environment_context", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table_req", bytes); } - ThriftHiveMetastore_drop_partition_by_name_with_environment_context_result result; + ThriftHiveMetastore_get_table_req_result result; try { - result.success = iface_->drop_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.deleteData, args.environment_context); + iface_->get_table_req(result.success, args.req); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_partition_by_name_with_environment_context"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table_req"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_table_req", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56659,58 +57188,61 @@ void ThriftHiveMetastoreProcessor::process_drop_partition_by_name_with_environme } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_partition_by_name_with_environment_context"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table_req"); } - oprot->writeMessageBegin("drop_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_table_req", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_partition_by_name_with_environment_context", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table_req", bytes); } } -void ThriftHiveMetastoreProcessor::process_drop_partitions_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_table_objects_by_name_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_partitions_req", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table_objects_by_name_req", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_partitions_req"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table_objects_by_name_req"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_partitions_req"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table_objects_by_name_req"); } - ThriftHiveMetastore_drop_partitions_req_args args; + ThriftHiveMetastore_get_table_objects_by_name_req_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_partitions_req", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table_objects_by_name_req", bytes); } - ThriftHiveMetastore_drop_partitions_req_result result; + ThriftHiveMetastore_get_table_objects_by_name_req_result result; try { - iface_->drop_partitions_req(result.success, args.req); + iface_->get_table_objects_by_name_req(result.success, args.req); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (InvalidOperationException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (UnknownDBException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_partitions_req"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table_objects_by_name_req"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_partitions_req", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_table_objects_by_name_req", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56719,58 +57251,61 @@ void ThriftHiveMetastoreProcessor::process_drop_partitions_req(int32_t seqid, :: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_partitions_req"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table_objects_by_name_req"); } - oprot->writeMessageBegin("drop_partitions_req", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_table_objects_by_name_req", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_partitions_req", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table_objects_by_name_req", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_table_names_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partition", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table_names_by_filter", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partition"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table_names_by_filter"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partition"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table_names_by_filter"); } - ThriftHiveMetastore_get_partition_args args; + ThriftHiveMetastore_get_table_names_by_filter_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partition", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table_names_by_filter", bytes); } - ThriftHiveMetastore_get_partition_result result; + ThriftHiveMetastore_get_table_names_by_filter_result result; try { - iface_->get_partition(result.success, args.db_name, args.tbl_name, args.part_vals); + iface_->get_table_names_by_filter(result.success, args.dbname, args.filter, args.max_tables); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (InvalidOperationException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (UnknownDBException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partition"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table_names_by_filter"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partition", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_table_names_by_filter", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56779,64 +57314,57 @@ void ThriftHiveMetastoreProcessor::process_get_partition(int32_t seqid, ::apache } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partition"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table_names_by_filter"); } - oprot->writeMessageBegin("get_partition", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_table_names_by_filter", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partition", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table_names_by_filter", bytes); } } -void ThriftHiveMetastoreProcessor::process_exchange_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_alter_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.exchange_partition", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_table", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.exchange_partition"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_table"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.exchange_partition"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_table"); } - ThriftHiveMetastore_exchange_partition_args args; + ThriftHiveMetastore_alter_table_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.exchange_partition", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_table", bytes); } - ThriftHiveMetastore_exchange_partition_result result; + ThriftHiveMetastore_alter_table_result result; try { - iface_->exchange_partition(result.success, args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name); - result.__isset.success = true; - } catch (MetaException &o1) { + iface_->alter_table(args.dbname, args.tbl_name, args.new_tbl); + } catch (InvalidOperationException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (InvalidObjectException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (InvalidInputException &o4) { - result.o4 = o4; - result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.exchange_partition"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_table"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("exchange_partition", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("alter_table", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56845,64 +57373,57 @@ void ThriftHiveMetastoreProcessor::process_exchange_partition(int32_t seqid, ::a } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.exchange_partition"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_table"); } - oprot->writeMessageBegin("exchange_partition", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("alter_table", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.exchange_partition", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_table", bytes); } } -void ThriftHiveMetastoreProcessor::process_exchange_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_alter_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.exchange_partitions", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_table_with_environment_context", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.exchange_partitions"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_table_with_environment_context"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.exchange_partitions"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_table_with_environment_context"); } - ThriftHiveMetastore_exchange_partitions_args args; + ThriftHiveMetastore_alter_table_with_environment_context_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.exchange_partitions", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_table_with_environment_context", bytes); } - ThriftHiveMetastore_exchange_partitions_result result; + ThriftHiveMetastore_alter_table_with_environment_context_result result; try { - iface_->exchange_partitions(result.success, args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name); - result.__isset.success = true; - } catch (MetaException &o1) { + iface_->alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context); + } catch (InvalidOperationException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (InvalidObjectException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (InvalidInputException &o4) { - result.o4 = o4; - result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.exchange_partitions"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_table_with_environment_context"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("exchange_partitions", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("alter_table_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56911,58 +57432,57 @@ void ThriftHiveMetastoreProcessor::process_exchange_partitions(int32_t seqid, :: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.exchange_partitions"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_table_with_environment_context"); } - oprot->writeMessageBegin("exchange_partitions", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("alter_table_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.exchange_partitions", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_table_with_environment_context", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partition_with_auth(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_alter_table_with_cascade(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partition_with_auth", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_table_with_cascade", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partition_with_auth"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_table_with_cascade"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partition_with_auth"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_table_with_cascade"); } - ThriftHiveMetastore_get_partition_with_auth_args args; + ThriftHiveMetastore_alter_table_with_cascade_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partition_with_auth", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_table_with_cascade", bytes); } - ThriftHiveMetastore_get_partition_with_auth_result result; + ThriftHiveMetastore_alter_table_with_cascade_result result; try { - iface_->get_partition_with_auth(result.success, args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names); - result.__isset.success = true; - } catch (MetaException &o1) { + iface_->alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade); + } catch (InvalidOperationException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partition_with_auth"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_table_with_cascade"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partition_with_auth", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -56971,58 +57491,61 @@ void ThriftHiveMetastoreProcessor::process_get_partition_with_auth(int32_t seqid } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partition_with_auth"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_table_with_cascade"); } - oprot->writeMessageBegin("get_partition_with_auth", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partition_with_auth", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_table_with_cascade", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partition_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_add_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partition_by_name", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_partition", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partition_by_name"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_partition"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partition_by_name"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_partition"); } - ThriftHiveMetastore_get_partition_by_name_args args; + ThriftHiveMetastore_add_partition_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partition_by_name", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_partition", bytes); } - ThriftHiveMetastore_get_partition_by_name_result result; + ThriftHiveMetastore_add_partition_result result; try { - iface_->get_partition_by_name(result.success, args.db_name, args.tbl_name, args.part_name); + iface_->add_partition(result.success, args.new_part); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (InvalidObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (AlreadyExistsException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partition_by_name"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_partition"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partition_by_name", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("add_partition", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57031,58 +57554,61 @@ void ThriftHiveMetastoreProcessor::process_get_partition_by_name(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partition_by_name"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_partition"); } - oprot->writeMessageBegin("get_partition_by_name", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("add_partition", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partition_by_name", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_partition", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_add_partition_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_partition_with_environment_context", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_partition_with_environment_context"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_partition_with_environment_context"); } - ThriftHiveMetastore_get_partitions_args args; + ThriftHiveMetastore_add_partition_with_environment_context_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_partition_with_environment_context", bytes); } - ThriftHiveMetastore_get_partitions_result result; + ThriftHiveMetastore_add_partition_with_environment_context_result result; try { - iface_->get_partitions(result.success, args.db_name, args.tbl_name, args.max_parts); + iface_->add_partition_with_environment_context(result.success, args.new_part, args.environment_context); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (InvalidObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (AlreadyExistsException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_partition_with_environment_context"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partitions", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("add_partition_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57091,58 +57617,61 @@ void ThriftHiveMetastoreProcessor::process_get_partitions(int32_t seqid, ::apach } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_partition_with_environment_context"); } - oprot->writeMessageBegin("get_partitions", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("add_partition_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_partition_with_environment_context", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partitions_with_auth(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_add_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_with_auth", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_partitions", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_with_auth"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_partitions"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_with_auth"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_partitions"); } - ThriftHiveMetastore_get_partitions_with_auth_args args; + ThriftHiveMetastore_add_partitions_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_with_auth", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_partitions", bytes); } - ThriftHiveMetastore_get_partitions_with_auth_result result; + ThriftHiveMetastore_add_partitions_result result; try { - iface_->get_partitions_with_auth(result.success, args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names); + result.success = iface_->add_partitions(args.new_parts); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (InvalidObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (AlreadyExistsException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_with_auth"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_partitions"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partitions_with_auth", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("add_partitions", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57151,58 +57680,61 @@ void ThriftHiveMetastoreProcessor::process_get_partitions_with_auth(int32_t seqi } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_with_auth"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_partitions"); } - oprot->writeMessageBegin("get_partitions_with_auth", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("add_partitions", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_with_auth", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_partitions", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partitions_pspec(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_add_partitions_pspec(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_pspec", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_partitions_pspec", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_pspec"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_partitions_pspec"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_pspec"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_partitions_pspec"); } - ThriftHiveMetastore_get_partitions_pspec_args args; + ThriftHiveMetastore_add_partitions_pspec_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_pspec", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_partitions_pspec", bytes); } - ThriftHiveMetastore_get_partitions_pspec_result result; + ThriftHiveMetastore_add_partitions_pspec_result result; try { - iface_->get_partitions_pspec(result.success, args.db_name, args.tbl_name, args.max_parts); + result.success = iface_->add_partitions_pspec(args.new_parts); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (InvalidObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (AlreadyExistsException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_pspec"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_partitions_pspec"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partitions_pspec", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("add_partitions_pspec", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57211,58 +57743,61 @@ void ThriftHiveMetastoreProcessor::process_get_partitions_pspec(int32_t seqid, : } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_pspec"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_partitions_pspec"); } - oprot->writeMessageBegin("get_partitions_pspec", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("add_partitions_pspec", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_pspec", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_partitions_pspec", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partition_names(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_append_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partition_names", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.append_partition", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partition_names"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.append_partition"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partition_names"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.append_partition"); } - ThriftHiveMetastore_get_partition_names_args args; + ThriftHiveMetastore_append_partition_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partition_names", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.append_partition", bytes); } - ThriftHiveMetastore_get_partition_names_result result; + ThriftHiveMetastore_append_partition_result result; try { - iface_->get_partition_names(result.success, args.db_name, args.tbl_name, args.max_parts); + iface_->append_partition(result.success, args.db_name, args.tbl_name, args.part_vals); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (InvalidObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (AlreadyExistsException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partition_names"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.append_partition"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partition_names", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("append_partition", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57271,58 +57806,61 @@ void ThriftHiveMetastoreProcessor::process_get_partition_names(int32_t seqid, :: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partition_names"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.append_partition"); } - oprot->writeMessageBegin("get_partition_names", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("append_partition", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partition_names", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.append_partition", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partition_values(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_add_partitions_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partition_values", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_partitions_req", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partition_values"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_partitions_req"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partition_values"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_partitions_req"); } - ThriftHiveMetastore_get_partition_values_args args; + ThriftHiveMetastore_add_partitions_req_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partition_values", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_partitions_req", bytes); } - ThriftHiveMetastore_get_partition_values_result result; + ThriftHiveMetastore_add_partitions_req_result result; try { - iface_->get_partition_values(result.success, args.request); + iface_->add_partitions_req(result.success, args.request); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (InvalidObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (AlreadyExistsException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partition_values"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_partitions_req"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partition_values", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("add_partitions_req", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57331,58 +57869,61 @@ void ThriftHiveMetastoreProcessor::process_get_partition_values(int32_t seqid, : } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partition_values"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_partitions_req"); } - oprot->writeMessageBegin("get_partition_values", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("add_partitions_req", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partition_values", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_partitions_req", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partitions_ps(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_append_partition_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_ps", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.append_partition_with_environment_context", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_ps"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.append_partition_with_environment_context"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_ps"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.append_partition_with_environment_context"); } - ThriftHiveMetastore_get_partitions_ps_args args; + ThriftHiveMetastore_append_partition_with_environment_context_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_ps", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.append_partition_with_environment_context", bytes); } - ThriftHiveMetastore_get_partitions_ps_result result; + ThriftHiveMetastore_append_partition_with_environment_context_result result; try { - iface_->get_partitions_ps(result.success, args.db_name, args.tbl_name, args.part_vals, args.max_parts); + iface_->append_partition_with_environment_context(result.success, args.db_name, args.tbl_name, args.part_vals, args.environment_context); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (InvalidObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (AlreadyExistsException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_ps"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.append_partition_with_environment_context"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partitions_ps", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("append_partition_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57391,58 +57932,61 @@ void ThriftHiveMetastoreProcessor::process_get_partitions_ps(int32_t seqid, ::ap } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_ps"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.append_partition_with_environment_context"); } - oprot->writeMessageBegin("get_partitions_ps", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("append_partition_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_ps", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.append_partition_with_environment_context", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partitions_ps_with_auth(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_append_partition_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_ps_with_auth", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.append_partition_by_name", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_ps_with_auth"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.append_partition_by_name"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_ps_with_auth"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.append_partition_by_name"); } - ThriftHiveMetastore_get_partitions_ps_with_auth_args args; + ThriftHiveMetastore_append_partition_by_name_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_ps_with_auth", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.append_partition_by_name", bytes); } - ThriftHiveMetastore_get_partitions_ps_with_auth_result result; + ThriftHiveMetastore_append_partition_by_name_result result; try { - iface_->get_partitions_ps_with_auth(result.success, args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names); + iface_->append_partition_by_name(result.success, args.db_name, args.tbl_name, args.part_name); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (InvalidObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (AlreadyExistsException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_ps_with_auth"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.append_partition_by_name"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partitions_ps_with_auth", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("append_partition_by_name", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57451,58 +57995,61 @@ void ThriftHiveMetastoreProcessor::process_get_partitions_ps_with_auth(int32_t s } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_ps_with_auth"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.append_partition_by_name"); } - oprot->writeMessageBegin("get_partitions_ps_with_auth", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("append_partition_by_name", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_ps_with_auth", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.append_partition_by_name", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partition_names_ps(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_append_partition_by_name_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partition_names_ps", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.append_partition_by_name_with_environment_context", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partition_names_ps"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.append_partition_by_name_with_environment_context"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partition_names_ps"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.append_partition_by_name_with_environment_context"); } - ThriftHiveMetastore_get_partition_names_ps_args args; + ThriftHiveMetastore_append_partition_by_name_with_environment_context_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partition_names_ps", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.append_partition_by_name_with_environment_context", bytes); } - ThriftHiveMetastore_get_partition_names_ps_result result; + ThriftHiveMetastore_append_partition_by_name_with_environment_context_result result; try { - iface_->get_partition_names_ps(result.success, args.db_name, args.tbl_name, args.part_vals, args.max_parts); + iface_->append_partition_by_name_with_environment_context(result.success, args.db_name, args.tbl_name, args.part_name, args.environment_context); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (InvalidObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (AlreadyExistsException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partition_names_ps"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.append_partition_by_name_with_environment_context"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partition_names_ps", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("append_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57511,58 +58058,58 @@ void ThriftHiveMetastoreProcessor::process_get_partition_names_ps(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partition_names_ps"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.append_partition_by_name_with_environment_context"); } - oprot->writeMessageBegin("get_partition_names_ps", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("append_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partition_names_ps", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.append_partition_by_name_with_environment_context", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partitions_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_drop_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_by_filter", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_partition", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_by_filter"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_partition"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_by_filter"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_partition"); } - ThriftHiveMetastore_get_partitions_by_filter_args args; + ThriftHiveMetastore_drop_partition_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_by_filter", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_partition", bytes); } - ThriftHiveMetastore_get_partitions_by_filter_result result; + ThriftHiveMetastore_drop_partition_result result; try { - iface_->get_partitions_by_filter(result.success, args.db_name, args.tbl_name, args.filter, args.max_parts); + result.success = iface_->drop_partition(args.db_name, args.tbl_name, args.part_vals, args.deleteData); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_by_filter"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_partition"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partitions_by_filter", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("drop_partition", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57571,58 +58118,58 @@ void ThriftHiveMetastoreProcessor::process_get_partitions_by_filter(int32_t seqi } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_by_filter"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_partition"); } - oprot->writeMessageBegin("get_partitions_by_filter", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("drop_partition", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_by_filter", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_partition", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_part_specs_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_drop_partition_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_part_specs_by_filter", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_partition_with_environment_context", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_part_specs_by_filter"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_partition_with_environment_context"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_part_specs_by_filter"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_partition_with_environment_context"); } - ThriftHiveMetastore_get_part_specs_by_filter_args args; + ThriftHiveMetastore_drop_partition_with_environment_context_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_part_specs_by_filter", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_partition_with_environment_context", bytes); } - ThriftHiveMetastore_get_part_specs_by_filter_result result; + ThriftHiveMetastore_drop_partition_with_environment_context_result result; try { - iface_->get_part_specs_by_filter(result.success, args.db_name, args.tbl_name, args.filter, args.max_parts); + result.success = iface_->drop_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.deleteData, args.environment_context); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_part_specs_by_filter"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_partition_with_environment_context"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_part_specs_by_filter", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("drop_partition_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57631,58 +58178,58 @@ void ThriftHiveMetastoreProcessor::process_get_part_specs_by_filter(int32_t seqi } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_part_specs_by_filter"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_partition_with_environment_context"); } - oprot->writeMessageBegin("get_part_specs_by_filter", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("drop_partition_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_part_specs_by_filter", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_partition_with_environment_context", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partitions_by_expr(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_drop_partition_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_by_expr", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_partition_by_name", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_by_expr"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_partition_by_name"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_by_expr"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_partition_by_name"); } - ThriftHiveMetastore_get_partitions_by_expr_args args; + ThriftHiveMetastore_drop_partition_by_name_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_by_expr", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_partition_by_name", bytes); } - ThriftHiveMetastore_get_partitions_by_expr_result result; + ThriftHiveMetastore_drop_partition_by_name_result result; try { - iface_->get_partitions_by_expr(result.success, args.req); + result.success = iface_->drop_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.deleteData); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_by_expr"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_partition_by_name"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partitions_by_expr", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("drop_partition_by_name", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57691,58 +58238,58 @@ void ThriftHiveMetastoreProcessor::process_get_partitions_by_expr(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_by_expr"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_partition_by_name"); } - oprot->writeMessageBegin("get_partitions_by_expr", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("drop_partition_by_name", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_by_expr", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_partition_by_name", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_num_partitions_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_drop_partition_by_name_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_num_partitions_by_filter", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_partition_by_name_with_environment_context", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_num_partitions_by_filter"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_partition_by_name_with_environment_context"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_num_partitions_by_filter"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_partition_by_name_with_environment_context"); } - ThriftHiveMetastore_get_num_partitions_by_filter_args args; + ThriftHiveMetastore_drop_partition_by_name_with_environment_context_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_num_partitions_by_filter", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_partition_by_name_with_environment_context", bytes); } - ThriftHiveMetastore_get_num_partitions_by_filter_result result; + ThriftHiveMetastore_drop_partition_by_name_with_environment_context_result result; try { - result.success = iface_->get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter); + result.success = iface_->drop_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.deleteData, args.environment_context); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_num_partitions_by_filter"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_partition_by_name_with_environment_context"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_num_partitions_by_filter", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("drop_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57751,58 +58298,58 @@ void ThriftHiveMetastoreProcessor::process_get_num_partitions_by_filter(int32_t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_num_partitions_by_filter"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_partition_by_name_with_environment_context"); } - oprot->writeMessageBegin("get_num_partitions_by_filter", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("drop_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_num_partitions_by_filter", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_partition_by_name_with_environment_context", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partitions_by_names(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_drop_partitions_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_by_names", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_partitions_req", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_by_names"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_partitions_req"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_by_names"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_partitions_req"); } - ThriftHiveMetastore_get_partitions_by_names_args args; + ThriftHiveMetastore_drop_partitions_req_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_by_names", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_partitions_req", bytes); } - ThriftHiveMetastore_get_partitions_by_names_result result; + ThriftHiveMetastore_drop_partitions_req_result result; try { - iface_->get_partitions_by_names(result.success, args.db_name, args.tbl_name, args.names); + iface_->drop_partitions_req(result.success, args.req); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_by_names"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_partitions_req"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("drop_partitions_req", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57811,57 +58358,58 @@ void ThriftHiveMetastoreProcessor::process_get_partitions_by_names(int32_t seqid } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_by_names"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_partitions_req"); } - oprot->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("drop_partitions_req", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_by_names", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_partitions_req", bytes); } } -void ThriftHiveMetastoreProcessor::process_alter_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_partition", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partition", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_partition"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partition"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_partition"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partition"); } - ThriftHiveMetastore_alter_partition_args args; + ThriftHiveMetastore_get_partition_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_partition", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partition", bytes); } - ThriftHiveMetastore_alter_partition_result result; + ThriftHiveMetastore_get_partition_result result; try { - iface_->alter_partition(args.db_name, args.tbl_name, args.new_part); - } catch (InvalidOperationException &o1) { + iface_->get_partition(result.success, args.db_name, args.tbl_name, args.part_vals); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_partition"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partition"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("alter_partition", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partition", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57870,57 +58418,64 @@ void ThriftHiveMetastoreProcessor::process_alter_partition(int32_t seqid, ::apac } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_partition"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partition"); } - oprot->writeMessageBegin("alter_partition", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partition", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_partition", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partition", bytes); } } -void ThriftHiveMetastoreProcessor::process_alter_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_exchange_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_partitions", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.exchange_partition", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_partitions"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.exchange_partition"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_partitions"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.exchange_partition"); } - ThriftHiveMetastore_alter_partitions_args args; + ThriftHiveMetastore_exchange_partition_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_partitions", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.exchange_partition", bytes); } - ThriftHiveMetastore_alter_partitions_result result; + ThriftHiveMetastore_exchange_partition_result result; try { - iface_->alter_partitions(args.db_name, args.tbl_name, args.new_parts); - } catch (InvalidOperationException &o1) { + iface_->exchange_partition(result.success, args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (InvalidObjectException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (InvalidInputException &o4) { + result.o4 = o4; + result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_partitions"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.exchange_partition"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("alter_partitions", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("exchange_partition", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57929,57 +58484,64 @@ void ThriftHiveMetastoreProcessor::process_alter_partitions(int32_t seqid, ::apa } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_partitions"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.exchange_partition"); } - oprot->writeMessageBegin("alter_partitions", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("exchange_partition", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_partitions", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.exchange_partition", bytes); } } -void ThriftHiveMetastoreProcessor::process_alter_partitions_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_exchange_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_partitions_with_environment_context", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.exchange_partitions", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.exchange_partitions"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.exchange_partitions"); } - ThriftHiveMetastore_alter_partitions_with_environment_context_args args; + ThriftHiveMetastore_exchange_partitions_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.exchange_partitions", bytes); } - ThriftHiveMetastore_alter_partitions_with_environment_context_result result; + ThriftHiveMetastore_exchange_partitions_result result; try { - iface_->alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context); - } catch (InvalidOperationException &o1) { + iface_->exchange_partitions(result.success, args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (InvalidObjectException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (InvalidInputException &o4) { + result.o4 = o4; + result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.exchange_partitions"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("exchange_partitions", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -57988,57 +58550,58 @@ void ThriftHiveMetastoreProcessor::process_alter_partitions_with_environment_con } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.exchange_partitions"); } - oprot->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("exchange_partitions", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.exchange_partitions", bytes); } } -void ThriftHiveMetastoreProcessor::process_alter_partition_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partition_with_auth(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_partition_with_environment_context", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partition_with_auth", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_partition_with_environment_context"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partition_with_auth"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_partition_with_environment_context"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partition_with_auth"); } - ThriftHiveMetastore_alter_partition_with_environment_context_args args; + ThriftHiveMetastore_get_partition_with_auth_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_partition_with_environment_context", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partition_with_auth", bytes); } - ThriftHiveMetastore_alter_partition_with_environment_context_result result; + ThriftHiveMetastore_get_partition_with_auth_result result; try { - iface_->alter_partition_with_environment_context(args.db_name, args.tbl_name, args.new_part, args.environment_context); - } catch (InvalidOperationException &o1) { + iface_->get_partition_with_auth(result.success, args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_partition_with_environment_context"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partition_with_auth"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("alter_partition_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partition_with_auth", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58047,57 +58610,58 @@ void ThriftHiveMetastoreProcessor::process_alter_partition_with_environment_cont } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_partition_with_environment_context"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partition_with_auth"); } - oprot->writeMessageBegin("alter_partition_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partition_with_auth", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_partition_with_environment_context", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partition_with_auth", bytes); } } -void ThriftHiveMetastoreProcessor::process_rename_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partition_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.rename_partition", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partition_by_name", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.rename_partition"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partition_by_name"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.rename_partition"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partition_by_name"); } - ThriftHiveMetastore_rename_partition_args args; + ThriftHiveMetastore_get_partition_by_name_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.rename_partition", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partition_by_name", bytes); } - ThriftHiveMetastore_rename_partition_result result; + ThriftHiveMetastore_get_partition_by_name_result result; try { - iface_->rename_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part); - } catch (InvalidOperationException &o1) { + iface_->get_partition_by_name(result.success, args.db_name, args.tbl_name, args.part_name); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.rename_partition"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partition_by_name"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("rename_partition", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partition_by_name", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58106,55 +58670,58 @@ void ThriftHiveMetastoreProcessor::process_rename_partition(int32_t seqid, ::apa } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.rename_partition"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partition_by_name"); } - oprot->writeMessageBegin("rename_partition", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partition_by_name", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.rename_partition", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partition_by_name", bytes); } } -void ThriftHiveMetastoreProcessor::process_partition_name_has_valid_characters(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.partition_name_has_valid_characters", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.partition_name_has_valid_characters"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.partition_name_has_valid_characters"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions"); } - ThriftHiveMetastore_partition_name_has_valid_characters_args args; + ThriftHiveMetastore_get_partitions_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.partition_name_has_valid_characters", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions", bytes); } - ThriftHiveMetastore_partition_name_has_valid_characters_result result; + ThriftHiveMetastore_get_partitions_result result; try { - result.success = iface_->partition_name_has_valid_characters(args.part_vals, args.throw_exception); + iface_->get_partitions(result.success, args.db_name, args.tbl_name, args.max_parts); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.partition_name_has_valid_characters"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("partition_name_has_valid_characters", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partitions", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58163,55 +58730,58 @@ void ThriftHiveMetastoreProcessor::process_partition_name_has_valid_characters(i } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.partition_name_has_valid_characters"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions"); } - oprot->writeMessageBegin("partition_name_has_valid_characters", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partitions", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.partition_name_has_valid_characters", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_config_value(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partitions_with_auth(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_config_value", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_with_auth", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_config_value"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_with_auth"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_config_value"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_with_auth"); } - ThriftHiveMetastore_get_config_value_args args; + ThriftHiveMetastore_get_partitions_with_auth_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_config_value", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_with_auth", bytes); } - ThriftHiveMetastore_get_config_value_result result; + ThriftHiveMetastore_get_partitions_with_auth_result result; try { - iface_->get_config_value(result.success, args.name, args.defaultValue); + iface_->get_partitions_with_auth(result.success, args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names); result.__isset.success = true; - } catch (ConfigValSecurityException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_config_value"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_with_auth"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_config_value", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partitions_with_auth", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58220,55 +58790,58 @@ void ThriftHiveMetastoreProcessor::process_get_config_value(int32_t seqid, ::apa } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_config_value"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_with_auth"); } - oprot->writeMessageBegin("get_config_value", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partitions_with_auth", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_config_value", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_with_auth", bytes); } } -void ThriftHiveMetastoreProcessor::process_partition_name_to_vals(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partitions_pspec(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.partition_name_to_vals", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_pspec", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.partition_name_to_vals"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_pspec"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.partition_name_to_vals"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_pspec"); } - ThriftHiveMetastore_partition_name_to_vals_args args; + ThriftHiveMetastore_get_partitions_pspec_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.partition_name_to_vals", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_pspec", bytes); } - ThriftHiveMetastore_partition_name_to_vals_result result; + ThriftHiveMetastore_get_partitions_pspec_result result; try { - iface_->partition_name_to_vals(result.success, args.part_name); + iface_->get_partitions_pspec(result.success, args.db_name, args.tbl_name, args.max_parts); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.partition_name_to_vals"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_pspec"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("partition_name_to_vals", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partitions_pspec", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58277,55 +58850,58 @@ void ThriftHiveMetastoreProcessor::process_partition_name_to_vals(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.partition_name_to_vals"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_pspec"); } - oprot->writeMessageBegin("partition_name_to_vals", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partitions_pspec", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.partition_name_to_vals", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_pspec", bytes); } } -void ThriftHiveMetastoreProcessor::process_partition_name_to_spec(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partition_names(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.partition_name_to_spec", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partition_names", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.partition_name_to_spec"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partition_names"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.partition_name_to_spec"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partition_names"); } - ThriftHiveMetastore_partition_name_to_spec_args args; + ThriftHiveMetastore_get_partition_names_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.partition_name_to_spec", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partition_names", bytes); } - ThriftHiveMetastore_partition_name_to_spec_result result; + ThriftHiveMetastore_get_partition_names_result result; try { - iface_->partition_name_to_spec(result.success, args.part_name); + iface_->get_partition_names(result.success, args.db_name, args.tbl_name, args.max_parts); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.partition_name_to_spec"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partition_names"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("partition_name_to_spec", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partition_names", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58334,69 +58910,58 @@ void ThriftHiveMetastoreProcessor::process_partition_name_to_spec(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.partition_name_to_spec"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partition_names"); } - oprot->writeMessageBegin("partition_name_to_spec", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partition_names", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.partition_name_to_spec", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partition_names", bytes); } } -void ThriftHiveMetastoreProcessor::process_markPartitionForEvent(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partition_values(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.markPartitionForEvent", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partition_values", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.markPartitionForEvent"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partition_values"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.markPartitionForEvent"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partition_values"); } - ThriftHiveMetastore_markPartitionForEvent_args args; + ThriftHiveMetastore_get_partition_values_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.markPartitionForEvent", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partition_values", bytes); } - ThriftHiveMetastore_markPartitionForEvent_result result; + ThriftHiveMetastore_get_partition_values_result result; try { - iface_->markPartitionForEvent(args.db_name, args.tbl_name, args.part_vals, args.eventType); + iface_->get_partition_values(result.success, args.request); + result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (UnknownDBException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (UnknownTableException &o4) { - result.o4 = o4; - result.__isset.o4 = true; - } catch (UnknownPartitionException &o5) { - result.o5 = o5; - result.__isset.o5 = true; - } catch (InvalidPartitionException &o6) { - result.o6 = o6; - result.__isset.o6 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.markPartitionForEvent"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partition_values"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("markPartitionForEvent", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partition_values", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58405,44 +58970,44 @@ void ThriftHiveMetastoreProcessor::process_markPartitionForEvent(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.markPartitionForEvent"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partition_values"); } - oprot->writeMessageBegin("markPartitionForEvent", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partition_values", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.markPartitionForEvent", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partition_values", bytes); } } -void ThriftHiveMetastoreProcessor::process_isPartitionMarkedForEvent(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partitions_ps(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.isPartitionMarkedForEvent", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_ps", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.isPartitionMarkedForEvent"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_ps"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.isPartitionMarkedForEvent"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_ps"); } - ThriftHiveMetastore_isPartitionMarkedForEvent_args args; + ThriftHiveMetastore_get_partitions_ps_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.isPartitionMarkedForEvent", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_ps", bytes); } - ThriftHiveMetastore_isPartitionMarkedForEvent_result result; + ThriftHiveMetastore_get_partitions_ps_result result; try { - result.success = iface_->isPartitionMarkedForEvent(args.db_name, args.tbl_name, args.part_vals, args.eventType); + iface_->get_partitions_ps(result.success, args.db_name, args.tbl_name, args.part_vals, args.max_parts); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; @@ -58450,25 +59015,13 @@ void ThriftHiveMetastoreProcessor::process_isPartitionMarkedForEvent(int32_t seq } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (UnknownDBException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (UnknownTableException &o4) { - result.o4 = o4; - result.__isset.o4 = true; - } catch (UnknownPartitionException &o5) { - result.o5 = o5; - result.__isset.o5 = true; - } catch (InvalidPartitionException &o6) { - result.o6 = o6; - result.__isset.o6 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.isPartitionMarkedForEvent"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_ps"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("isPartitionMarkedForEvent", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partitions_ps", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58477,61 +59030,58 @@ void ThriftHiveMetastoreProcessor::process_isPartitionMarkedForEvent(int32_t seq } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.isPartitionMarkedForEvent"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_ps"); } - oprot->writeMessageBegin("isPartitionMarkedForEvent", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partitions_ps", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.isPartitionMarkedForEvent", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_ps", bytes); } } -void ThriftHiveMetastoreProcessor::process_add_index(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partitions_ps_with_auth(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_index", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_ps_with_auth", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_index"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_ps_with_auth"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_index"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_ps_with_auth"); } - ThriftHiveMetastore_add_index_args args; + ThriftHiveMetastore_get_partitions_ps_with_auth_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_index", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_ps_with_auth", bytes); } - ThriftHiveMetastore_add_index_result result; + ThriftHiveMetastore_get_partitions_ps_with_auth_result result; try { - iface_->add_index(result.success, args.new_index, args.index_table); + iface_->get_partitions_ps_with_auth(result.success, args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names); result.__isset.success = true; - } catch (InvalidObjectException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (AlreadyExistsException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_index"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_ps_with_auth"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("add_index", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partitions_ps_with_auth", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58540,57 +59090,58 @@ void ThriftHiveMetastoreProcessor::process_add_index(int32_t seqid, ::apache::th } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_index"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_ps_with_auth"); } - oprot->writeMessageBegin("add_index", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partitions_ps_with_auth", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_index", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_ps_with_auth", bytes); } } -void ThriftHiveMetastoreProcessor::process_alter_index(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partition_names_ps(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_index", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partition_names_ps", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_index"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partition_names_ps"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_index"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partition_names_ps"); } - ThriftHiveMetastore_alter_index_args args; + ThriftHiveMetastore_get_partition_names_ps_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_index", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partition_names_ps", bytes); } - ThriftHiveMetastore_alter_index_result result; + ThriftHiveMetastore_get_partition_names_ps_result result; try { - iface_->alter_index(args.dbname, args.base_tbl_name, args.idx_name, args.new_idx); - } catch (InvalidOperationException &o1) { + iface_->get_partition_names_ps(result.success, args.db_name, args.tbl_name, args.part_vals, args.max_parts); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_index"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partition_names_ps"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("alter_index", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partition_names_ps", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58599,58 +59150,58 @@ void ThriftHiveMetastoreProcessor::process_alter_index(int32_t seqid, ::apache:: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_index"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partition_names_ps"); } - oprot->writeMessageBegin("alter_index", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partition_names_ps", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_index", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partition_names_ps", bytes); } } -void ThriftHiveMetastoreProcessor::process_drop_index_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partitions_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_index_by_name", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_by_filter", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_index_by_name"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_by_filter"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_index_by_name"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_by_filter"); } - ThriftHiveMetastore_drop_index_by_name_args args; + ThriftHiveMetastore_get_partitions_by_filter_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_index_by_name", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_by_filter", bytes); } - ThriftHiveMetastore_drop_index_by_name_result result; + ThriftHiveMetastore_get_partitions_by_filter_result result; try { - result.success = iface_->drop_index_by_name(args.db_name, args.tbl_name, args.index_name, args.deleteData); + iface_->get_partitions_by_filter(result.success, args.db_name, args.tbl_name, args.filter, args.max_parts); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_index_by_name"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_by_filter"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_index_by_name", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partitions_by_filter", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58659,44 +59210,44 @@ void ThriftHiveMetastoreProcessor::process_drop_index_by_name(int32_t seqid, ::a } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_index_by_name"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_by_filter"); } - oprot->writeMessageBegin("drop_index_by_name", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partitions_by_filter", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_index_by_name", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_by_filter", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_index_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_part_specs_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_index_by_name", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_part_specs_by_filter", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_index_by_name"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_part_specs_by_filter"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_index_by_name"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_part_specs_by_filter"); } - ThriftHiveMetastore_get_index_by_name_args args; + ThriftHiveMetastore_get_part_specs_by_filter_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_index_by_name", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_part_specs_by_filter", bytes); } - ThriftHiveMetastore_get_index_by_name_result result; + ThriftHiveMetastore_get_part_specs_by_filter_result result; try { - iface_->get_index_by_name(result.success, args.db_name, args.tbl_name, args.index_name); + iface_->get_part_specs_by_filter(result.success, args.db_name, args.tbl_name, args.filter, args.max_parts); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; @@ -58706,11 +59257,11 @@ void ThriftHiveMetastoreProcessor::process_get_index_by_name(int32_t seqid, ::ap result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_index_by_name"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_part_specs_by_filter"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_index_by_name", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_part_specs_by_filter", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58719,58 +59270,58 @@ void ThriftHiveMetastoreProcessor::process_get_index_by_name(int32_t seqid, ::ap } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_index_by_name"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_part_specs_by_filter"); } - oprot->writeMessageBegin("get_index_by_name", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_part_specs_by_filter", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_index_by_name", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_part_specs_by_filter", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_indexes(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partitions_by_expr(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_indexes", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_by_expr", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_indexes"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_by_expr"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_indexes"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_by_expr"); } - ThriftHiveMetastore_get_indexes_args args; + ThriftHiveMetastore_get_partitions_by_expr_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_indexes", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_by_expr", bytes); } - ThriftHiveMetastore_get_indexes_result result; + ThriftHiveMetastore_get_partitions_by_expr_result result; try { - iface_->get_indexes(result.success, args.db_name, args.tbl_name, args.max_indexes); + iface_->get_partitions_by_expr(result.success, args.req); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_indexes"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_by_expr"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_indexes", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partitions_by_expr", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58779,55 +59330,58 @@ void ThriftHiveMetastoreProcessor::process_get_indexes(int32_t seqid, ::apache:: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_indexes"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_by_expr"); } - oprot->writeMessageBegin("get_indexes", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partitions_by_expr", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_indexes", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_by_expr", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_index_names(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_num_partitions_by_filter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_index_names", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_num_partitions_by_filter", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_index_names"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_num_partitions_by_filter"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_index_names"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_num_partitions_by_filter"); } - ThriftHiveMetastore_get_index_names_args args; + ThriftHiveMetastore_get_num_partitions_by_filter_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_index_names", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_num_partitions_by_filter", bytes); } - ThriftHiveMetastore_get_index_names_result result; + ThriftHiveMetastore_get_num_partitions_by_filter_result result; try { - iface_->get_index_names(result.success, args.db_name, args.tbl_name, args.max_indexes); + result.success = iface_->get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter); result.__isset.success = true; - } catch (MetaException &o2) { + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_index_names"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_num_partitions_by_filter"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_index_names", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_num_partitions_by_filter", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58836,44 +59390,44 @@ void ThriftHiveMetastoreProcessor::process_get_index_names(int32_t seqid, ::apac } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_index_names"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_num_partitions_by_filter"); } - oprot->writeMessageBegin("get_index_names", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_num_partitions_by_filter", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_index_names", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_num_partitions_by_filter", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_primary_keys(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partitions_by_names(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_primary_keys", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_by_names", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_primary_keys"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_by_names"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_primary_keys"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_by_names"); } - ThriftHiveMetastore_get_primary_keys_args args; + ThriftHiveMetastore_get_partitions_by_names_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_primary_keys", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_by_names", bytes); } - ThriftHiveMetastore_get_primary_keys_result result; + ThriftHiveMetastore_get_partitions_by_names_result result; try { - iface_->get_primary_keys(result.success, args.request); + iface_->get_partitions_by_names(result.success, args.db_name, args.tbl_name, args.names); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; @@ -58883,11 +59437,11 @@ void ThriftHiveMetastoreProcessor::process_get_primary_keys(int32_t seqid, ::apa result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_primary_keys"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_by_names"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58896,58 +59450,57 @@ void ThriftHiveMetastoreProcessor::process_get_primary_keys(int32_t seqid, ::apa } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_primary_keys"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_by_names"); } - oprot->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_primary_keys", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_by_names", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_foreign_keys(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_alter_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_foreign_keys", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_partition", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_foreign_keys"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_partition"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_foreign_keys"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_partition"); } - ThriftHiveMetastore_get_foreign_keys_args args; + ThriftHiveMetastore_alter_partition_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_foreign_keys", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_partition", bytes); } - ThriftHiveMetastore_get_foreign_keys_result result; + ThriftHiveMetastore_alter_partition_result result; try { - iface_->get_foreign_keys(result.success, args.request); - result.__isset.success = true; - } catch (MetaException &o1) { + iface_->alter_partition(args.db_name, args.tbl_name, args.new_part); + } catch (InvalidOperationException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_foreign_keys"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_partition"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("alter_partition", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -58956,58 +59509,57 @@ void ThriftHiveMetastoreProcessor::process_get_foreign_keys(int32_t seqid, ::apa } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_foreign_keys"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_partition"); } - oprot->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("alter_partition", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_foreign_keys", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_partition", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_unique_constraints(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_alter_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_unique_constraints", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_partitions", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_unique_constraints"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_partitions"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_unique_constraints"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_partitions"); } - ThriftHiveMetastore_get_unique_constraints_args args; + ThriftHiveMetastore_alter_partitions_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_unique_constraints", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_partitions", bytes); } - ThriftHiveMetastore_get_unique_constraints_result result; + ThriftHiveMetastore_alter_partitions_result result; try { - iface_->get_unique_constraints(result.success, args.request); - result.__isset.success = true; - } catch (MetaException &o1) { + iface_->alter_partitions(args.db_name, args.tbl_name, args.new_parts); + } catch (InvalidOperationException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_unique_constraints"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_partitions"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_unique_constraints", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("alter_partitions", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59016,58 +59568,57 @@ void ThriftHiveMetastoreProcessor::process_get_unique_constraints(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_unique_constraints"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_partitions"); } - oprot->writeMessageBegin("get_unique_constraints", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("alter_partitions", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_unique_constraints", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_partitions", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_not_null_constraints(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_alter_partitions_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_not_null_constraints", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_partitions_with_environment_context", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_not_null_constraints"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_not_null_constraints"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context"); } - ThriftHiveMetastore_get_not_null_constraints_args args; + ThriftHiveMetastore_alter_partitions_with_environment_context_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_not_null_constraints", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context", bytes); } - ThriftHiveMetastore_get_not_null_constraints_result result; + ThriftHiveMetastore_alter_partitions_with_environment_context_result result; try { - iface_->get_not_null_constraints(result.success, args.request); - result.__isset.success = true; - } catch (MetaException &o1) { + iface_->alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context); + } catch (InvalidOperationException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_not_null_constraints"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_not_null_constraints", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59076,64 +59627,57 @@ void ThriftHiveMetastoreProcessor::process_get_not_null_constraints(int32_t seqi } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_not_null_constraints"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context"); } - oprot->writeMessageBegin("get_not_null_constraints", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_not_null_constraints", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context", bytes); } } -void ThriftHiveMetastoreProcessor::process_update_table_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_alter_partition_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.update_table_column_statistics", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_partition_with_environment_context", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.update_table_column_statistics"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_partition_with_environment_context"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.update_table_column_statistics"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_partition_with_environment_context"); } - ThriftHiveMetastore_update_table_column_statistics_args args; + ThriftHiveMetastore_alter_partition_with_environment_context_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.update_table_column_statistics", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_partition_with_environment_context", bytes); } - ThriftHiveMetastore_update_table_column_statistics_result result; + ThriftHiveMetastore_alter_partition_with_environment_context_result result; try { - result.success = iface_->update_table_column_statistics(args.stats_obj); - result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + iface_->alter_partition_with_environment_context(args.db_name, args.tbl_name, args.new_part, args.environment_context); + } catch (InvalidOperationException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (InvalidObjectException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (InvalidInputException &o4) { - result.o4 = o4; - result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.update_table_column_statistics"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_partition_with_environment_context"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("update_table_column_statistics", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("alter_partition_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59142,64 +59686,57 @@ void ThriftHiveMetastoreProcessor::process_update_table_column_statistics(int32_ } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.update_table_column_statistics"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_partition_with_environment_context"); } - oprot->writeMessageBegin("update_table_column_statistics", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("alter_partition_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.update_table_column_statistics", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_partition_with_environment_context", bytes); } } -void ThriftHiveMetastoreProcessor::process_update_partition_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_rename_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.update_partition_column_statistics", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.rename_partition", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.update_partition_column_statistics"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.rename_partition"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.update_partition_column_statistics"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.rename_partition"); } - ThriftHiveMetastore_update_partition_column_statistics_args args; + ThriftHiveMetastore_rename_partition_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.update_partition_column_statistics", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.rename_partition", bytes); } - ThriftHiveMetastore_update_partition_column_statistics_result result; + ThriftHiveMetastore_rename_partition_result result; try { - result.success = iface_->update_partition_column_statistics(args.stats_obj); - result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + iface_->rename_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part); + } catch (InvalidOperationException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (InvalidObjectException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (InvalidInputException &o4) { - result.o4 = o4; - result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.update_partition_column_statistics"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.rename_partition"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("update_partition_column_statistics", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("rename_partition", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59208,64 +59745,55 @@ void ThriftHiveMetastoreProcessor::process_update_partition_column_statistics(in } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.update_partition_column_statistics"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.rename_partition"); } - oprot->writeMessageBegin("update_partition_column_statistics", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("rename_partition", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.update_partition_column_statistics", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.rename_partition", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_table_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_partition_name_has_valid_characters(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table_column_statistics", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.partition_name_has_valid_characters", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table_column_statistics"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.partition_name_has_valid_characters"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table_column_statistics"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.partition_name_has_valid_characters"); } - ThriftHiveMetastore_get_table_column_statistics_args args; + ThriftHiveMetastore_partition_name_has_valid_characters_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table_column_statistics", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.partition_name_has_valid_characters", bytes); } - ThriftHiveMetastore_get_table_column_statistics_result result; + ThriftHiveMetastore_partition_name_has_valid_characters_result result; try { - iface_->get_table_column_statistics(result.success, args.db_name, args.tbl_name, args.col_name); + result.success = iface_->partition_name_has_valid_characters(args.part_vals, args.throw_exception); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (InvalidInputException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (InvalidObjectException &o4) { - result.o4 = o4; - result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table_column_statistics"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.partition_name_has_valid_characters"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_table_column_statistics", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("partition_name_has_valid_characters", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59274,64 +59802,55 @@ void ThriftHiveMetastoreProcessor::process_get_table_column_statistics(int32_t s } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table_column_statistics"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.partition_name_has_valid_characters"); } - oprot->writeMessageBegin("get_table_column_statistics", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("partition_name_has_valid_characters", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table_column_statistics", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.partition_name_has_valid_characters", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partition_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_config_value(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partition_column_statistics", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_config_value", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partition_column_statistics"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_config_value"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partition_column_statistics"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_config_value"); } - ThriftHiveMetastore_get_partition_column_statistics_args args; + ThriftHiveMetastore_get_config_value_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partition_column_statistics", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_config_value", bytes); } - ThriftHiveMetastore_get_partition_column_statistics_result result; + ThriftHiveMetastore_get_config_value_result result; try { - iface_->get_partition_column_statistics(result.success, args.db_name, args.tbl_name, args.part_name, args.col_name); + iface_->get_config_value(result.success, args.name, args.defaultValue); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (ConfigValSecurityException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (InvalidInputException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (InvalidObjectException &o4) { - result.o4 = o4; - result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partition_column_statistics"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_config_value"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partition_column_statistics", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_config_value", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59340,58 +59859,55 @@ void ThriftHiveMetastoreProcessor::process_get_partition_column_statistics(int32 } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partition_column_statistics"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_config_value"); } - oprot->writeMessageBegin("get_partition_column_statistics", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_config_value", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partition_column_statistics", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_config_value", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_table_statistics_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_partition_name_to_vals(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table_statistics_req", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.partition_name_to_vals", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table_statistics_req"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.partition_name_to_vals"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table_statistics_req"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.partition_name_to_vals"); } - ThriftHiveMetastore_get_table_statistics_req_args args; + ThriftHiveMetastore_partition_name_to_vals_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table_statistics_req", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.partition_name_to_vals", bytes); } - ThriftHiveMetastore_get_table_statistics_req_result result; + ThriftHiveMetastore_partition_name_to_vals_result result; try { - iface_->get_table_statistics_req(result.success, args.request); + iface_->partition_name_to_vals(result.success, args.part_name); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table_statistics_req"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.partition_name_to_vals"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_table_statistics_req", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("partition_name_to_vals", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59400,58 +59916,55 @@ void ThriftHiveMetastoreProcessor::process_get_table_statistics_req(int32_t seqi } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table_statistics_req"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.partition_name_to_vals"); } - oprot->writeMessageBegin("get_table_statistics_req", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("partition_name_to_vals", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table_statistics_req", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.partition_name_to_vals", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_partitions_statistics_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_partition_name_to_spec(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_statistics_req", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.partition_name_to_spec", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_statistics_req"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.partition_name_to_spec"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_statistics_req"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.partition_name_to_spec"); } - ThriftHiveMetastore_get_partitions_statistics_req_args args; + ThriftHiveMetastore_partition_name_to_spec_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_statistics_req", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.partition_name_to_spec", bytes); } - ThriftHiveMetastore_get_partitions_statistics_req_result result; + ThriftHiveMetastore_partition_name_to_spec_result result; try { - iface_->get_partitions_statistics_req(result.success, args.request); + iface_->partition_name_to_spec(result.success, args.part_name); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_statistics_req"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.partition_name_to_spec"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_partitions_statistics_req", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("partition_name_to_spec", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59460,58 +59973,69 @@ void ThriftHiveMetastoreProcessor::process_get_partitions_statistics_req(int32_t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_statistics_req"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.partition_name_to_spec"); } - oprot->writeMessageBegin("get_partitions_statistics_req", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("partition_name_to_spec", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_statistics_req", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.partition_name_to_spec", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_aggr_stats_for(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_markPartitionForEvent(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_aggr_stats_for", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.markPartitionForEvent", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_aggr_stats_for"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.markPartitionForEvent"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_aggr_stats_for"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.markPartitionForEvent"); } - ThriftHiveMetastore_get_aggr_stats_for_args args; + ThriftHiveMetastore_markPartitionForEvent_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_aggr_stats_for", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.markPartitionForEvent", bytes); } - ThriftHiveMetastore_get_aggr_stats_for_result result; + ThriftHiveMetastore_markPartitionForEvent_result result; try { - iface_->get_aggr_stats_for(result.success, args.request); - result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + iface_->markPartitionForEvent(args.db_name, args.tbl_name, args.part_vals, args.eventType); + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; + } catch (UnknownDBException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (UnknownTableException &o4) { + result.o4 = o4; + result.__isset.o4 = true; + } catch (UnknownPartitionException &o5) { + result.o5 = o5; + result.__isset.o5 = true; + } catch (InvalidPartitionException &o6) { + result.o6 = o6; + result.__isset.o6 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_aggr_stats_for"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.markPartitionForEvent"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_aggr_stats_for", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("markPartitionForEvent", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59520,64 +60044,70 @@ void ThriftHiveMetastoreProcessor::process_get_aggr_stats_for(int32_t seqid, ::a } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_aggr_stats_for"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.markPartitionForEvent"); } - oprot->writeMessageBegin("get_aggr_stats_for", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("markPartitionForEvent", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_aggr_stats_for", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.markPartitionForEvent", bytes); } } -void ThriftHiveMetastoreProcessor::process_set_aggr_stats_for(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_isPartitionMarkedForEvent(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.set_aggr_stats_for", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.isPartitionMarkedForEvent", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.set_aggr_stats_for"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.isPartitionMarkedForEvent"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.set_aggr_stats_for"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.isPartitionMarkedForEvent"); } - ThriftHiveMetastore_set_aggr_stats_for_args args; + ThriftHiveMetastore_isPartitionMarkedForEvent_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.set_aggr_stats_for", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.isPartitionMarkedForEvent", bytes); } - ThriftHiveMetastore_set_aggr_stats_for_result result; + ThriftHiveMetastore_isPartitionMarkedForEvent_result result; try { - result.success = iface_->set_aggr_stats_for(args.request); + result.success = iface_->isPartitionMarkedForEvent(args.db_name, args.tbl_name, args.part_vals, args.eventType); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (InvalidObjectException &o2) { + } catch (NoSuchObjectException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (MetaException &o3) { + } catch (UnknownDBException &o3) { result.o3 = o3; result.__isset.o3 = true; - } catch (InvalidInputException &o4) { + } catch (UnknownTableException &o4) { result.o4 = o4; result.__isset.o4 = true; + } catch (UnknownPartitionException &o5) { + result.o5 = o5; + result.__isset.o5 = true; + } catch (InvalidPartitionException &o6) { + result.o6 = o6; + result.__isset.o6 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.set_aggr_stats_for"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.isPartitionMarkedForEvent"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("isPartitionMarkedForEvent", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59586,64 +60116,61 @@ void ThriftHiveMetastoreProcessor::process_set_aggr_stats_for(int32_t seqid, ::a } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.set_aggr_stats_for"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.isPartitionMarkedForEvent"); } - oprot->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("isPartitionMarkedForEvent", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.set_aggr_stats_for", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.isPartitionMarkedForEvent", bytes); } } -void ThriftHiveMetastoreProcessor::process_delete_partition_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_add_index(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.delete_partition_column_statistics", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_index", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.delete_partition_column_statistics"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_index"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.delete_partition_column_statistics"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_index"); } - ThriftHiveMetastore_delete_partition_column_statistics_args args; + ThriftHiveMetastore_add_index_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.delete_partition_column_statistics", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_index", bytes); } - ThriftHiveMetastore_delete_partition_column_statistics_result result; + ThriftHiveMetastore_add_index_result result; try { - result.success = iface_->delete_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name); + iface_->add_index(result.success, args.new_index, args.index_table); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (InvalidObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { + } catch (AlreadyExistsException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (InvalidObjectException &o3) { + } catch (MetaException &o3) { result.o3 = o3; result.__isset.o3 = true; - } catch (InvalidInputException &o4) { - result.o4 = o4; - result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.delete_partition_column_statistics"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_index"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("delete_partition_column_statistics", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("add_index", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59652,64 +60179,57 @@ void ThriftHiveMetastoreProcessor::process_delete_partition_column_statistics(in } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.delete_partition_column_statistics"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_index"); } - oprot->writeMessageBegin("delete_partition_column_statistics", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("add_index", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.delete_partition_column_statistics", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_index", bytes); } } -void ThriftHiveMetastoreProcessor::process_delete_table_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_alter_index(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.delete_table_column_statistics", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_index", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.delete_table_column_statistics"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_index"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.delete_table_column_statistics"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_index"); } - ThriftHiveMetastore_delete_table_column_statistics_args args; + ThriftHiveMetastore_alter_index_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.delete_table_column_statistics", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_index", bytes); } - ThriftHiveMetastore_delete_table_column_statistics_result result; + ThriftHiveMetastore_alter_index_result result; try { - result.success = iface_->delete_table_column_statistics(args.db_name, args.tbl_name, args.col_name); - result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + iface_->alter_index(args.dbname, args.base_tbl_name, args.idx_name, args.new_idx); + } catch (InvalidOperationException &o1) { result.o1 = o1; result.__isset.o1 = true; } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (InvalidObjectException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (InvalidInputException &o4) { - result.o4 = o4; - result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.delete_table_column_statistics"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_index"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("delete_table_column_statistics", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("alter_index", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59718,63 +60238,58 @@ void ThriftHiveMetastoreProcessor::process_delete_table_column_statistics(int32_ } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.delete_table_column_statistics"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_index"); } - oprot->writeMessageBegin("delete_table_column_statistics", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("alter_index", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.delete_table_column_statistics", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_index", bytes); } } -void ThriftHiveMetastoreProcessor::process_create_function(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_drop_index_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_function", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_index_by_name", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_function"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_index_by_name"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_function"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_index_by_name"); } - ThriftHiveMetastore_create_function_args args; + ThriftHiveMetastore_drop_index_by_name_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_function", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_index_by_name", bytes); } - ThriftHiveMetastore_create_function_result result; + ThriftHiveMetastore_drop_index_by_name_result result; try { - iface_->create_function(args.func); - } catch (AlreadyExistsException &o1) { + result.success = iface_->drop_index_by_name(args.db_name, args.tbl_name, args.index_name, args.deleteData); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (InvalidObjectException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (NoSuchObjectException &o4) { - result.o4 = o4; - result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_function"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_index_by_name"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("create_function", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("drop_index_by_name", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59783,57 +60298,58 @@ void ThriftHiveMetastoreProcessor::process_create_function(int32_t seqid, ::apac } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_function"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_index_by_name"); } - oprot->writeMessageBegin("create_function", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("drop_index_by_name", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_function", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_index_by_name", bytes); } } -void ThriftHiveMetastoreProcessor::process_drop_function(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_index_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_function", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_index_by_name", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_function"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_index_by_name"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_function"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_index_by_name"); } - ThriftHiveMetastore_drop_function_args args; + ThriftHiveMetastore_get_index_by_name_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_function", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_index_by_name", bytes); } - ThriftHiveMetastore_drop_function_result result; + ThriftHiveMetastore_get_index_by_name_result result; try { - iface_->drop_function(args.dbName, args.funcName); - } catch (NoSuchObjectException &o1) { + iface_->get_index_by_name(result.success, args.db_name, args.tbl_name, args.index_name); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_function"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_index_by_name"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_function", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_index_by_name", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59842,45 +60358,46 @@ void ThriftHiveMetastoreProcessor::process_drop_function(int32_t seqid, ::apache } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_function"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_index_by_name"); } - oprot->writeMessageBegin("drop_function", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_index_by_name", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_function", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_index_by_name", bytes); } } -void ThriftHiveMetastoreProcessor::process_alter_function(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_indexes(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_function", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_indexes", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_function"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_indexes"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_function"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_indexes"); } - ThriftHiveMetastore_alter_function_args args; + ThriftHiveMetastore_get_indexes_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_function", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_indexes", bytes); } - ThriftHiveMetastore_alter_function_result result; + ThriftHiveMetastore_get_indexes_result result; try { - iface_->alter_function(args.dbName, args.funcName, args.newFunc); - } catch (InvalidOperationException &o1) { + iface_->get_indexes(result.success, args.db_name, args.tbl_name, args.max_indexes); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; } catch (MetaException &o2) { @@ -59888,11 +60405,11 @@ void ThriftHiveMetastoreProcessor::process_alter_function(int32_t seqid, ::apach result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_function"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_indexes"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("alter_function", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_indexes", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59901,55 +60418,55 @@ void ThriftHiveMetastoreProcessor::process_alter_function(int32_t seqid, ::apach } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_function"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_indexes"); } - oprot->writeMessageBegin("alter_function", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_indexes", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_function", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_indexes", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_functions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_index_names(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_functions", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_index_names", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_functions"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_index_names"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_functions"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_index_names"); } - ThriftHiveMetastore_get_functions_args args; + ThriftHiveMetastore_get_index_names_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_functions", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_index_names", bytes); } - ThriftHiveMetastore_get_functions_result result; + ThriftHiveMetastore_get_index_names_result result; try { - iface_->get_functions(result.success, args.dbName, args.pattern); + iface_->get_index_names(result.success, args.db_name, args.tbl_name, args.max_indexes); result.__isset.success = true; - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_functions"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_index_names"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_functions", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_index_names", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59958,44 +60475,44 @@ void ThriftHiveMetastoreProcessor::process_get_functions(int32_t seqid, ::apache } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_functions"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_index_names"); } - oprot->writeMessageBegin("get_functions", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_index_names", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_functions", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_index_names", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_function(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_primary_keys(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_function", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_primary_keys", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_function"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_primary_keys"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_function"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_primary_keys"); } - ThriftHiveMetastore_get_function_args args; + ThriftHiveMetastore_get_primary_keys_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_function", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_primary_keys", bytes); } - ThriftHiveMetastore_get_function_result result; + ThriftHiveMetastore_get_primary_keys_result result; try { - iface_->get_function(result.success, args.dbName, args.funcName); + iface_->get_primary_keys(result.success, args.request); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; @@ -60005,11 +60522,11 @@ void ThriftHiveMetastoreProcessor::process_get_function(int32_t seqid, ::apache: result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_function"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_primary_keys"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_function", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60018,55 +60535,58 @@ void ThriftHiveMetastoreProcessor::process_get_function(int32_t seqid, ::apache: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_function"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_primary_keys"); } - oprot->writeMessageBegin("get_function", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_function", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_primary_keys", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_all_functions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_foreign_keys(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_all_functions", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_foreign_keys", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_all_functions"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_foreign_keys"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_all_functions"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_foreign_keys"); } - ThriftHiveMetastore_get_all_functions_args args; + ThriftHiveMetastore_get_foreign_keys_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_all_functions", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_foreign_keys", bytes); } - ThriftHiveMetastore_get_all_functions_result result; + ThriftHiveMetastore_get_foreign_keys_result result; try { - iface_->get_all_functions(result.success); + iface_->get_foreign_keys(result.success, args.request); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_all_functions"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_foreign_keys"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_all_functions", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60075,55 +60595,58 @@ void ThriftHiveMetastoreProcessor::process_get_all_functions(int32_t seqid, ::ap } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_all_functions"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_foreign_keys"); } - oprot->writeMessageBegin("get_all_functions", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_all_functions", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_foreign_keys", bytes); } } -void ThriftHiveMetastoreProcessor::process_create_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_unique_constraints(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_role", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_unique_constraints", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_role"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_unique_constraints"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_role"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_unique_constraints"); } - ThriftHiveMetastore_create_role_args args; + ThriftHiveMetastore_get_unique_constraints_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_role", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_unique_constraints", bytes); } - ThriftHiveMetastore_create_role_result result; + ThriftHiveMetastore_get_unique_constraints_result result; try { - result.success = iface_->create_role(args.role); + iface_->get_unique_constraints(result.success, args.request); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_role"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_unique_constraints"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("create_role", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_unique_constraints", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60132,55 +60655,58 @@ void ThriftHiveMetastoreProcessor::process_create_role(int32_t seqid, ::apache:: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_role"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_unique_constraints"); } - oprot->writeMessageBegin("create_role", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_unique_constraints", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_role", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_unique_constraints", bytes); } } -void ThriftHiveMetastoreProcessor::process_drop_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_not_null_constraints(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_role", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_not_null_constraints", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_role"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_not_null_constraints"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_role"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_not_null_constraints"); } - ThriftHiveMetastore_drop_role_args args; + ThriftHiveMetastore_get_not_null_constraints_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_role", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_not_null_constraints", bytes); } - ThriftHiveMetastore_drop_role_result result; + ThriftHiveMetastore_get_not_null_constraints_result result; try { - result.success = iface_->drop_role(args.role_name); + iface_->get_not_null_constraints(result.success, args.request); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_role"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_not_null_constraints"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_role", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_not_null_constraints", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60189,55 +60715,64 @@ void ThriftHiveMetastoreProcessor::process_drop_role(int32_t seqid, ::apache::th } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_role"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_not_null_constraints"); } - oprot->writeMessageBegin("drop_role", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_not_null_constraints", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_role", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_not_null_constraints", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_role_names(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_update_table_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_role_names", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.update_table_column_statistics", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_role_names"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.update_table_column_statistics"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_role_names"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.update_table_column_statistics"); } - ThriftHiveMetastore_get_role_names_args args; + ThriftHiveMetastore_update_table_column_statistics_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_role_names", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.update_table_column_statistics", bytes); } - ThriftHiveMetastore_get_role_names_result result; + ThriftHiveMetastore_update_table_column_statistics_result result; try { - iface_->get_role_names(result.success); + result.success = iface_->update_table_column_statistics(args.stats_obj); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (InvalidObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (InvalidInputException &o4) { + result.o4 = o4; + result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_role_names"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.update_table_column_statistics"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_role_names", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("update_table_column_statistics", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60246,55 +60781,64 @@ void ThriftHiveMetastoreProcessor::process_get_role_names(int32_t seqid, ::apach } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_role_names"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.update_table_column_statistics"); } - oprot->writeMessageBegin("get_role_names", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("update_table_column_statistics", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_role_names", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.update_table_column_statistics", bytes); } } -void ThriftHiveMetastoreProcessor::process_grant_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_update_partition_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.grant_role", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.update_partition_column_statistics", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.grant_role"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.update_partition_column_statistics"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.grant_role"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.update_partition_column_statistics"); } - ThriftHiveMetastore_grant_role_args args; + ThriftHiveMetastore_update_partition_column_statistics_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.grant_role", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.update_partition_column_statistics", bytes); } - ThriftHiveMetastore_grant_role_result result; + ThriftHiveMetastore_update_partition_column_statistics_result result; try { - result.success = iface_->grant_role(args.role_name, args.principal_name, args.principal_type, args.grantor, args.grantorType, args.grant_option); + result.success = iface_->update_partition_column_statistics(args.stats_obj); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (InvalidObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (InvalidInputException &o4) { + result.o4 = o4; + result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.grant_role"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.update_partition_column_statistics"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("grant_role", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("update_partition_column_statistics", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60303,55 +60847,64 @@ void ThriftHiveMetastoreProcessor::process_grant_role(int32_t seqid, ::apache::t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.grant_role"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.update_partition_column_statistics"); } - oprot->writeMessageBegin("grant_role", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("update_partition_column_statistics", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.grant_role", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.update_partition_column_statistics", bytes); } } -void ThriftHiveMetastoreProcessor::process_revoke_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_table_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.revoke_role", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table_column_statistics", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.revoke_role"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table_column_statistics"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.revoke_role"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table_column_statistics"); } - ThriftHiveMetastore_revoke_role_args args; + ThriftHiveMetastore_get_table_column_statistics_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.revoke_role", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table_column_statistics", bytes); } - ThriftHiveMetastore_revoke_role_result result; + ThriftHiveMetastore_get_table_column_statistics_result result; try { - result.success = iface_->revoke_role(args.role_name, args.principal_name, args.principal_type); + iface_->get_table_column_statistics(result.success, args.db_name, args.tbl_name, args.col_name); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (InvalidInputException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (InvalidObjectException &o4) { + result.o4 = o4; + result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.revoke_role"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table_column_statistics"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("revoke_role", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_table_column_statistics", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60360,55 +60913,64 @@ void ThriftHiveMetastoreProcessor::process_revoke_role(int32_t seqid, ::apache:: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.revoke_role"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table_column_statistics"); } - oprot->writeMessageBegin("revoke_role", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_table_column_statistics", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.revoke_role", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table_column_statistics", bytes); } } -void ThriftHiveMetastoreProcessor::process_list_roles(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partition_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.list_roles", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partition_column_statistics", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.list_roles"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partition_column_statistics"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.list_roles"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partition_column_statistics"); } - ThriftHiveMetastore_list_roles_args args; + ThriftHiveMetastore_get_partition_column_statistics_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.list_roles", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partition_column_statistics", bytes); } - ThriftHiveMetastore_list_roles_result result; + ThriftHiveMetastore_get_partition_column_statistics_result result; try { - iface_->list_roles(result.success, args.principal_name, args.principal_type); + iface_->get_partition_column_statistics(result.success, args.db_name, args.tbl_name, args.part_name, args.col_name); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (InvalidInputException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (InvalidObjectException &o4) { + result.o4 = o4; + result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.list_roles"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partition_column_statistics"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("list_roles", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partition_column_statistics", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60417,55 +60979,58 @@ void ThriftHiveMetastoreProcessor::process_list_roles(int32_t seqid, ::apache::t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.list_roles"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partition_column_statistics"); } - oprot->writeMessageBegin("list_roles", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partition_column_statistics", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.list_roles", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partition_column_statistics", bytes); } } -void ThriftHiveMetastoreProcessor::process_grant_revoke_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_table_statistics_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.grant_revoke_role", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_table_statistics_req", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.grant_revoke_role"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_table_statistics_req"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.grant_revoke_role"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_table_statistics_req"); } - ThriftHiveMetastore_grant_revoke_role_args args; + ThriftHiveMetastore_get_table_statistics_req_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.grant_revoke_role", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_table_statistics_req", bytes); } - ThriftHiveMetastore_grant_revoke_role_result result; + ThriftHiveMetastore_get_table_statistics_req_result result; try { - iface_->grant_revoke_role(result.success, args.request); + iface_->get_table_statistics_req(result.success, args.request); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.grant_revoke_role"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_table_statistics_req"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("grant_revoke_role", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_table_statistics_req", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60474,55 +61039,58 @@ void ThriftHiveMetastoreProcessor::process_grant_revoke_role(int32_t seqid, ::ap } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.grant_revoke_role"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_table_statistics_req"); } - oprot->writeMessageBegin("grant_revoke_role", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_table_statistics_req", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.grant_revoke_role", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_table_statistics_req", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_principals_in_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_partitions_statistics_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_principals_in_role", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_partitions_statistics_req", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_principals_in_role"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_partitions_statistics_req"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_principals_in_role"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_partitions_statistics_req"); } - ThriftHiveMetastore_get_principals_in_role_args args; + ThriftHiveMetastore_get_partitions_statistics_req_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_principals_in_role", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_partitions_statistics_req", bytes); } - ThriftHiveMetastore_get_principals_in_role_result result; + ThriftHiveMetastore_get_partitions_statistics_req_result result; try { - iface_->get_principals_in_role(result.success, args.request); + iface_->get_partitions_statistics_req(result.success, args.request); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_principals_in_role"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_partitions_statistics_req"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_principals_in_role", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_partitions_statistics_req", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60531,55 +61099,58 @@ void ThriftHiveMetastoreProcessor::process_get_principals_in_role(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_principals_in_role"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_partitions_statistics_req"); } - oprot->writeMessageBegin("get_principals_in_role", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_partitions_statistics_req", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_principals_in_role", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_partitions_statistics_req", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_role_grants_for_principal(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_aggr_stats_for(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_role_grants_for_principal", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_aggr_stats_for", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_role_grants_for_principal"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_aggr_stats_for"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_role_grants_for_principal"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_aggr_stats_for"); } - ThriftHiveMetastore_get_role_grants_for_principal_args args; + ThriftHiveMetastore_get_aggr_stats_for_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_role_grants_for_principal", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_aggr_stats_for", bytes); } - ThriftHiveMetastore_get_role_grants_for_principal_result result; + ThriftHiveMetastore_get_aggr_stats_for_result result; try { - iface_->get_role_grants_for_principal(result.success, args.request); + iface_->get_aggr_stats_for(result.success, args.request); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_role_grants_for_principal"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_aggr_stats_for"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_role_grants_for_principal", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_aggr_stats_for", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60588,55 +61159,64 @@ void ThriftHiveMetastoreProcessor::process_get_role_grants_for_principal(int32_t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_role_grants_for_principal"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_aggr_stats_for"); } - oprot->writeMessageBegin("get_role_grants_for_principal", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_aggr_stats_for", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_role_grants_for_principal", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_aggr_stats_for", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_privilege_set(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_set_aggr_stats_for(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_privilege_set", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.set_aggr_stats_for", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_privilege_set"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.set_aggr_stats_for"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_privilege_set"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.set_aggr_stats_for"); } - ThriftHiveMetastore_get_privilege_set_args args; + ThriftHiveMetastore_set_aggr_stats_for_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_privilege_set", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.set_aggr_stats_for", bytes); } - ThriftHiveMetastore_get_privilege_set_result result; + ThriftHiveMetastore_set_aggr_stats_for_result result; try { - iface_->get_privilege_set(result.success, args.hiveObject, args.user_name, args.group_names); + result.success = iface_->set_aggr_stats_for(args.request); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (InvalidObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (InvalidInputException &o4) { + result.o4 = o4; + result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_privilege_set"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.set_aggr_stats_for"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_privilege_set", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60645,55 +61225,64 @@ void ThriftHiveMetastoreProcessor::process_get_privilege_set(int32_t seqid, ::ap } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_privilege_set"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.set_aggr_stats_for"); } - oprot->writeMessageBegin("get_privilege_set", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_privilege_set", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.set_aggr_stats_for", bytes); } } -void ThriftHiveMetastoreProcessor::process_list_privileges(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_delete_partition_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.list_privileges", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.delete_partition_column_statistics", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.list_privileges"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.delete_partition_column_statistics"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.list_privileges"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.delete_partition_column_statistics"); } - ThriftHiveMetastore_list_privileges_args args; + ThriftHiveMetastore_delete_partition_column_statistics_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.list_privileges", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.delete_partition_column_statistics", bytes); } - ThriftHiveMetastore_list_privileges_result result; + ThriftHiveMetastore_delete_partition_column_statistics_result result; try { - iface_->list_privileges(result.success, args.principal_name, args.principal_type, args.hiveObject); + result.success = iface_->delete_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (InvalidObjectException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (InvalidInputException &o4) { + result.o4 = o4; + result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.list_privileges"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.delete_partition_column_statistics"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("list_privileges", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("delete_partition_column_statistics", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60702,55 +61291,64 @@ void ThriftHiveMetastoreProcessor::process_list_privileges(int32_t seqid, ::apac } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.list_privileges"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.delete_partition_column_statistics"); } - oprot->writeMessageBegin("list_privileges", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("delete_partition_column_statistics", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.list_privileges", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.delete_partition_column_statistics", bytes); } } -void ThriftHiveMetastoreProcessor::process_grant_privileges(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_delete_table_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.grant_privileges", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.delete_table_column_statistics", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.grant_privileges"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.delete_table_column_statistics"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.grant_privileges"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.delete_table_column_statistics"); } - ThriftHiveMetastore_grant_privileges_args args; + ThriftHiveMetastore_delete_table_column_statistics_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.grant_privileges", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.delete_table_column_statistics", bytes); } - ThriftHiveMetastore_grant_privileges_result result; + ThriftHiveMetastore_delete_table_column_statistics_result result; try { - result.success = iface_->grant_privileges(args.privileges); + result.success = iface_->delete_table_column_statistics(args.db_name, args.tbl_name, args.col_name); result.__isset.success = true; - } catch (MetaException &o1) { + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (InvalidObjectException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (InvalidInputException &o4) { + result.o4 = o4; + result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.grant_privileges"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.delete_table_column_statistics"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("grant_privileges", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("delete_table_column_statistics", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60759,55 +61357,63 @@ void ThriftHiveMetastoreProcessor::process_grant_privileges(int32_t seqid, ::apa } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.grant_privileges"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.delete_table_column_statistics"); } - oprot->writeMessageBegin("grant_privileges", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("delete_table_column_statistics", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.grant_privileges", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.delete_table_column_statistics", bytes); } } -void ThriftHiveMetastoreProcessor::process_revoke_privileges(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_create_function(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.revoke_privileges", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_function", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.revoke_privileges"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_function"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.revoke_privileges"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_function"); } - ThriftHiveMetastore_revoke_privileges_args args; + ThriftHiveMetastore_create_function_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.revoke_privileges", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_function", bytes); } - ThriftHiveMetastore_revoke_privileges_result result; + ThriftHiveMetastore_create_function_result result; try { - result.success = iface_->revoke_privileges(args.privileges); - result.__isset.success = true; - } catch (MetaException &o1) { + iface_->create_function(args.func); + } catch (AlreadyExistsException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (InvalidObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (NoSuchObjectException &o4) { + result.o4 = o4; + result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.revoke_privileges"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_function"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("revoke_privileges", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("create_function", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60816,55 +61422,57 @@ void ThriftHiveMetastoreProcessor::process_revoke_privileges(int32_t seqid, ::ap } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.revoke_privileges"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_function"); } - oprot->writeMessageBegin("revoke_privileges", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("create_function", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.revoke_privileges", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_function", bytes); } } -void ThriftHiveMetastoreProcessor::process_grant_revoke_privileges(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_drop_function(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.grant_revoke_privileges", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_function", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.grant_revoke_privileges"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_function"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.grant_revoke_privileges"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_function"); } - ThriftHiveMetastore_grant_revoke_privileges_args args; + ThriftHiveMetastore_drop_function_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.grant_revoke_privileges", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_function", bytes); } - ThriftHiveMetastore_grant_revoke_privileges_result result; + ThriftHiveMetastore_drop_function_result result; try { - iface_->grant_revoke_privileges(result.success, args.request); - result.__isset.success = true; - } catch (MetaException &o1) { + iface_->drop_function(args.dbName, args.funcName); + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.grant_revoke_privileges"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_function"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("grant_revoke_privileges", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("drop_function", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60873,55 +61481,57 @@ void ThriftHiveMetastoreProcessor::process_grant_revoke_privileges(int32_t seqid } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.grant_revoke_privileges"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_function"); } - oprot->writeMessageBegin("grant_revoke_privileges", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("drop_function", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.grant_revoke_privileges", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_function", bytes); } } -void ThriftHiveMetastoreProcessor::process_set_ugi(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_alter_function(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.set_ugi", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_function", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.set_ugi"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_function"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.set_ugi"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_function"); } - ThriftHiveMetastore_set_ugi_args args; + ThriftHiveMetastore_alter_function_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.set_ugi", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_function", bytes); } - ThriftHiveMetastore_set_ugi_result result; + ThriftHiveMetastore_alter_function_result result; try { - iface_->set_ugi(result.success, args.user_name, args.group_names); - result.__isset.success = true; - } catch (MetaException &o1) { + iface_->alter_function(args.dbName, args.funcName, args.newFunc); + } catch (InvalidOperationException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.set_ugi"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_function"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("set_ugi", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("alter_function", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60930,55 +61540,55 @@ void ThriftHiveMetastoreProcessor::process_set_ugi(int32_t seqid, ::apache::thri } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.set_ugi"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_function"); } - oprot->writeMessageBegin("set_ugi", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("alter_function", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.set_ugi", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_function", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_delegation_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_functions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_delegation_token", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_functions", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_delegation_token"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_functions"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_delegation_token"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_functions"); } - ThriftHiveMetastore_get_delegation_token_args args; + ThriftHiveMetastore_get_functions_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_delegation_token", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_functions", bytes); } - ThriftHiveMetastore_get_delegation_token_result result; + ThriftHiveMetastore_get_functions_result result; try { - iface_->get_delegation_token(result.success, args.token_owner, args.renewer_kerberos_principal_name); + iface_->get_functions(result.success, args.dbName, args.pattern); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_delegation_token"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_functions"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_delegation_token", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_functions", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -60987,55 +61597,58 @@ void ThriftHiveMetastoreProcessor::process_get_delegation_token(int32_t seqid, : } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_delegation_token"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_functions"); } - oprot->writeMessageBegin("get_delegation_token", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_functions", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_delegation_token", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_functions", bytes); } } -void ThriftHiveMetastoreProcessor::process_renew_delegation_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_function(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.renew_delegation_token", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_function", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.renew_delegation_token"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_function"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.renew_delegation_token"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_function"); } - ThriftHiveMetastore_renew_delegation_token_args args; + ThriftHiveMetastore_get_function_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.renew_delegation_token", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_function", bytes); } - ThriftHiveMetastore_renew_delegation_token_result result; + ThriftHiveMetastore_get_function_result result; try { - result.success = iface_->renew_delegation_token(args.token_str_form); + iface_->get_function(result.success, args.dbName, args.funcName); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.renew_delegation_token"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_function"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("renew_delegation_token", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_function", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61044,54 +61657,55 @@ void ThriftHiveMetastoreProcessor::process_renew_delegation_token(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.renew_delegation_token"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_function"); } - oprot->writeMessageBegin("renew_delegation_token", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_function", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.renew_delegation_token", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_function", bytes); } } -void ThriftHiveMetastoreProcessor::process_cancel_delegation_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_all_functions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.cancel_delegation_token", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_all_functions", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.cancel_delegation_token"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_all_functions"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.cancel_delegation_token"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_all_functions"); } - ThriftHiveMetastore_cancel_delegation_token_args args; + ThriftHiveMetastore_get_all_functions_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.cancel_delegation_token", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_all_functions", bytes); } - ThriftHiveMetastore_cancel_delegation_token_result result; + ThriftHiveMetastore_get_all_functions_result result; try { - iface_->cancel_delegation_token(args.token_str_form); + iface_->get_all_functions(result.success); + result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.cancel_delegation_token"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_all_functions"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("cancel_delegation_token", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_all_functions", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61100,52 +61714,55 @@ void ThriftHiveMetastoreProcessor::process_cancel_delegation_token(int32_t seqid } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.cancel_delegation_token"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_all_functions"); } - oprot->writeMessageBegin("cancel_delegation_token", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_all_functions", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.cancel_delegation_token", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_all_functions", bytes); } } -void ThriftHiveMetastoreProcessor::process_add_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_create_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_token", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_role", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_token"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_role"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_token"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_role"); } - ThriftHiveMetastore_add_token_args args; + ThriftHiveMetastore_create_role_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_token", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_role", bytes); } - ThriftHiveMetastore_add_token_result result; + ThriftHiveMetastore_create_role_result result; try { - result.success = iface_->add_token(args.token_identifier, args.delegation_token); + result.success = iface_->create_role(args.role); result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_token"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_role"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("add_token", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("create_role", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61154,52 +61771,55 @@ void ThriftHiveMetastoreProcessor::process_add_token(int32_t seqid, ::apache::th } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_token"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_role"); } - oprot->writeMessageBegin("add_token", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("create_role", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_token", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_role", bytes); } } -void ThriftHiveMetastoreProcessor::process_remove_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_drop_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.remove_token", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_role", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.remove_token"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_role"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.remove_token"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_role"); } - ThriftHiveMetastore_remove_token_args args; + ThriftHiveMetastore_drop_role_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.remove_token", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_role", bytes); } - ThriftHiveMetastore_remove_token_result result; + ThriftHiveMetastore_drop_role_result result; try { - result.success = iface_->remove_token(args.token_identifier); + result.success = iface_->drop_role(args.role_name); result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.remove_token"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_role"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("remove_token", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("drop_role", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61208,52 +61828,55 @@ void ThriftHiveMetastoreProcessor::process_remove_token(int32_t seqid, ::apache: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.remove_token"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_role"); } - oprot->writeMessageBegin("remove_token", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("drop_role", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.remove_token", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_role", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_role_names(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_token", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_role_names", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_token"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_role_names"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_token"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_role_names"); } - ThriftHiveMetastore_get_token_args args; + ThriftHiveMetastore_get_role_names_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_token", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_role_names", bytes); } - ThriftHiveMetastore_get_token_result result; + ThriftHiveMetastore_get_role_names_result result; try { - iface_->get_token(result.success, args.token_identifier); + iface_->get_role_names(result.success); result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_token"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_role_names"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_token", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_role_names", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61262,52 +61885,55 @@ void ThriftHiveMetastoreProcessor::process_get_token(int32_t seqid, ::apache::th } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_token"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_role_names"); } - oprot->writeMessageBegin("get_token", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_role_names", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_token", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_role_names", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_all_token_identifiers(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_grant_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_all_token_identifiers", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.grant_role", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_all_token_identifiers"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.grant_role"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_all_token_identifiers"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.grant_role"); } - ThriftHiveMetastore_get_all_token_identifiers_args args; + ThriftHiveMetastore_grant_role_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_all_token_identifiers", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.grant_role", bytes); } - ThriftHiveMetastore_get_all_token_identifiers_result result; + ThriftHiveMetastore_grant_role_result result; try { - iface_->get_all_token_identifiers(result.success); + result.success = iface_->grant_role(args.role_name, args.principal_name, args.principal_type, args.grantor, args.grantorType, args.grant_option); result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_all_token_identifiers"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.grant_role"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_all_token_identifiers", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("grant_role", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61316,55 +61942,55 @@ void ThriftHiveMetastoreProcessor::process_get_all_token_identifiers(int32_t seq } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_all_token_identifiers"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.grant_role"); } - oprot->writeMessageBegin("get_all_token_identifiers", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("grant_role", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_all_token_identifiers", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.grant_role", bytes); } } -void ThriftHiveMetastoreProcessor::process_add_master_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_revoke_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_master_key", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.revoke_role", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_master_key"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.revoke_role"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_master_key"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.revoke_role"); } - ThriftHiveMetastore_add_master_key_args args; + ThriftHiveMetastore_revoke_role_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_master_key", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.revoke_role", bytes); } - ThriftHiveMetastore_add_master_key_result result; + ThriftHiveMetastore_revoke_role_result result; try { - result.success = iface_->add_master_key(args.key); + result.success = iface_->revoke_role(args.role_name, args.principal_name, args.principal_type); result.__isset.success = true; } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_master_key"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.revoke_role"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("add_master_key", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("revoke_role", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61373,57 +61999,55 @@ void ThriftHiveMetastoreProcessor::process_add_master_key(int32_t seqid, ::apach } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_master_key"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.revoke_role"); } - oprot->writeMessageBegin("add_master_key", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("revoke_role", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_master_key", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.revoke_role", bytes); } } -void ThriftHiveMetastoreProcessor::process_update_master_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_list_roles(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.update_master_key", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.list_roles", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.update_master_key"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.list_roles"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.update_master_key"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.list_roles"); } - ThriftHiveMetastore_update_master_key_args args; + ThriftHiveMetastore_list_roles_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.update_master_key", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.list_roles", bytes); } - ThriftHiveMetastore_update_master_key_result result; + ThriftHiveMetastore_list_roles_result result; try { - iface_->update_master_key(args.seq_number, args.key); - } catch (NoSuchObjectException &o1) { + iface_->list_roles(result.success, args.principal_name, args.principal_type); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.update_master_key"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.list_roles"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("update_master_key", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("list_roles", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61432,52 +62056,55 @@ void ThriftHiveMetastoreProcessor::process_update_master_key(int32_t seqid, ::ap } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.update_master_key"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.list_roles"); } - oprot->writeMessageBegin("update_master_key", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("list_roles", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.update_master_key", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.list_roles", bytes); } } -void ThriftHiveMetastoreProcessor::process_remove_master_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_grant_revoke_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.remove_master_key", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.grant_revoke_role", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.remove_master_key"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.grant_revoke_role"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.remove_master_key"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.grant_revoke_role"); } - ThriftHiveMetastore_remove_master_key_args args; + ThriftHiveMetastore_grant_revoke_role_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.remove_master_key", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.grant_revoke_role", bytes); } - ThriftHiveMetastore_remove_master_key_result result; + ThriftHiveMetastore_grant_revoke_role_result result; try { - result.success = iface_->remove_master_key(args.key_seq); + iface_->grant_revoke_role(result.success, args.request); result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.remove_master_key"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.grant_revoke_role"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("remove_master_key", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("grant_revoke_role", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61486,52 +62113,55 @@ void ThriftHiveMetastoreProcessor::process_remove_master_key(int32_t seqid, ::ap } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.remove_master_key"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.grant_revoke_role"); } - oprot->writeMessageBegin("remove_master_key", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("grant_revoke_role", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.remove_master_key", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.grant_revoke_role", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_master_keys(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_principals_in_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_master_keys", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_principals_in_role", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_master_keys"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_principals_in_role"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_master_keys"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_principals_in_role"); } - ThriftHiveMetastore_get_master_keys_args args; + ThriftHiveMetastore_get_principals_in_role_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_master_keys", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_principals_in_role", bytes); } - ThriftHiveMetastore_get_master_keys_result result; + ThriftHiveMetastore_get_principals_in_role_result result; try { - iface_->get_master_keys(result.success); + iface_->get_principals_in_role(result.success, args.request); result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_master_keys"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_principals_in_role"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_master_keys", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_principals_in_role", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61540,52 +62170,55 @@ void ThriftHiveMetastoreProcessor::process_get_master_keys(int32_t seqid, ::apac } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_master_keys"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_principals_in_role"); } - oprot->writeMessageBegin("get_master_keys", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_principals_in_role", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_master_keys", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_principals_in_role", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_open_txns(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_role_grants_for_principal(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_open_txns", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_role_grants_for_principal", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_open_txns"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_role_grants_for_principal"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_open_txns"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_role_grants_for_principal"); } - ThriftHiveMetastore_get_open_txns_args args; + ThriftHiveMetastore_get_role_grants_for_principal_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_open_txns", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_role_grants_for_principal", bytes); } - ThriftHiveMetastore_get_open_txns_result result; + ThriftHiveMetastore_get_role_grants_for_principal_result result; try { - iface_->get_open_txns(result.success); + iface_->get_role_grants_for_principal(result.success, args.request); result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_open_txns"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_role_grants_for_principal"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_open_txns", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_role_grants_for_principal", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61594,52 +62227,55 @@ void ThriftHiveMetastoreProcessor::process_get_open_txns(int32_t seqid, ::apache } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_open_txns"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_role_grants_for_principal"); } - oprot->writeMessageBegin("get_open_txns", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_role_grants_for_principal", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_open_txns", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_role_grants_for_principal", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_open_txns_info(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_privilege_set(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_open_txns_info", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_privilege_set", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_open_txns_info"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_privilege_set"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_open_txns_info"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_privilege_set"); } - ThriftHiveMetastore_get_open_txns_info_args args; + ThriftHiveMetastore_get_privilege_set_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_open_txns_info", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_privilege_set", bytes); } - ThriftHiveMetastore_get_open_txns_info_result result; + ThriftHiveMetastore_get_privilege_set_result result; try { - iface_->get_open_txns_info(result.success); + iface_->get_privilege_set(result.success, args.hiveObject, args.user_name, args.group_names); result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_open_txns_info"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_privilege_set"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_open_txns_info", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_privilege_set", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61648,52 +62284,55 @@ void ThriftHiveMetastoreProcessor::process_get_open_txns_info(int32_t seqid, ::a } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_open_txns_info"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_privilege_set"); } - oprot->writeMessageBegin("get_open_txns_info", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_privilege_set", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_open_txns_info", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_privilege_set", bytes); } } -void ThriftHiveMetastoreProcessor::process_open_txns(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_list_privileges(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.open_txns", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.list_privileges", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.open_txns"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.list_privileges"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.open_txns"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.list_privileges"); } - ThriftHiveMetastore_open_txns_args args; + ThriftHiveMetastore_list_privileges_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.open_txns", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.list_privileges", bytes); } - ThriftHiveMetastore_open_txns_result result; + ThriftHiveMetastore_list_privileges_result result; try { - iface_->open_txns(result.success, args.rqst); + iface_->list_privileges(result.success, args.principal_name, args.principal_type, args.hiveObject); result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.open_txns"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.list_privileges"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("open_txns", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("list_privileges", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61702,54 +62341,55 @@ void ThriftHiveMetastoreProcessor::process_open_txns(int32_t seqid, ::apache::th } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.open_txns"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.list_privileges"); } - oprot->writeMessageBegin("open_txns", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("list_privileges", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.open_txns", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.list_privileges", bytes); } } -void ThriftHiveMetastoreProcessor::process_abort_txn(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_grant_privileges(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.abort_txn", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.grant_privileges", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.abort_txn"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.grant_privileges"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.abort_txn"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.grant_privileges"); } - ThriftHiveMetastore_abort_txn_args args; + ThriftHiveMetastore_grant_privileges_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.abort_txn", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.grant_privileges", bytes); } - ThriftHiveMetastore_abort_txn_result result; + ThriftHiveMetastore_grant_privileges_result result; try { - iface_->abort_txn(args.rqst); - } catch (NoSuchTxnException &o1) { + result.success = iface_->grant_privileges(args.privileges); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.abort_txn"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.grant_privileges"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("abort_txn", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("grant_privileges", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61758,54 +62398,55 @@ void ThriftHiveMetastoreProcessor::process_abort_txn(int32_t seqid, ::apache::th } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.abort_txn"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.grant_privileges"); } - oprot->writeMessageBegin("abort_txn", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("grant_privileges", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.abort_txn", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.grant_privileges", bytes); } } -void ThriftHiveMetastoreProcessor::process_abort_txns(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_revoke_privileges(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.abort_txns", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.revoke_privileges", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.abort_txns"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.revoke_privileges"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.abort_txns"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.revoke_privileges"); } - ThriftHiveMetastore_abort_txns_args args; + ThriftHiveMetastore_revoke_privileges_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.abort_txns", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.revoke_privileges", bytes); } - ThriftHiveMetastore_abort_txns_result result; + ThriftHiveMetastore_revoke_privileges_result result; try { - iface_->abort_txns(args.rqst); - } catch (NoSuchTxnException &o1) { + result.success = iface_->revoke_privileges(args.privileges); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.abort_txns"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.revoke_privileges"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("abort_txns", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("revoke_privileges", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61814,57 +62455,55 @@ void ThriftHiveMetastoreProcessor::process_abort_txns(int32_t seqid, ::apache::t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.abort_txns"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.revoke_privileges"); } - oprot->writeMessageBegin("abort_txns", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("revoke_privileges", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.abort_txns", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.revoke_privileges", bytes); } } -void ThriftHiveMetastoreProcessor::process_commit_txn(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_grant_revoke_privileges(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.commit_txn", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.grant_revoke_privileges", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.commit_txn"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.grant_revoke_privileges"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.commit_txn"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.grant_revoke_privileges"); } - ThriftHiveMetastore_commit_txn_args args; + ThriftHiveMetastore_grant_revoke_privileges_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.commit_txn", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.grant_revoke_privileges", bytes); } - ThriftHiveMetastore_commit_txn_result result; + ThriftHiveMetastore_grant_revoke_privileges_result result; try { - iface_->commit_txn(args.rqst); - } catch (NoSuchTxnException &o1) { + iface_->grant_revoke_privileges(result.success, args.request); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (TxnAbortedException &o2) { - result.o2 = o2; - result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.commit_txn"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.grant_revoke_privileges"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("commit_txn", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("grant_revoke_privileges", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61873,58 +62512,55 @@ void ThriftHiveMetastoreProcessor::process_commit_txn(int32_t seqid, ::apache::t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.commit_txn"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.grant_revoke_privileges"); } - oprot->writeMessageBegin("commit_txn", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("grant_revoke_privileges", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.commit_txn", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.grant_revoke_privileges", bytes); } } -void ThriftHiveMetastoreProcessor::process_lock(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_set_ugi(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.lock", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.set_ugi", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.lock"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.set_ugi"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.lock"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.set_ugi"); } - ThriftHiveMetastore_lock_args args; + ThriftHiveMetastore_set_ugi_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.lock", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.set_ugi", bytes); } - ThriftHiveMetastore_lock_result result; + ThriftHiveMetastore_set_ugi_result result; try { - iface_->lock(result.success, args.rqst); + iface_->set_ugi(result.success, args.user_name, args.group_names); result.__isset.success = true; - } catch (NoSuchTxnException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (TxnAbortedException &o2) { - result.o2 = o2; - result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.lock"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.set_ugi"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("lock", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("set_ugi", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61933,61 +62569,55 @@ void ThriftHiveMetastoreProcessor::process_lock(int32_t seqid, ::apache::thrift: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.lock"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.set_ugi"); } - oprot->writeMessageBegin("lock", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("set_ugi", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.lock", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.set_ugi", bytes); } } -void ThriftHiveMetastoreProcessor::process_check_lock(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_delegation_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.check_lock", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_delegation_token", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.check_lock"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_delegation_token"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.check_lock"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_delegation_token"); } - ThriftHiveMetastore_check_lock_args args; + ThriftHiveMetastore_get_delegation_token_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.check_lock", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_delegation_token", bytes); } - ThriftHiveMetastore_check_lock_result result; + ThriftHiveMetastore_get_delegation_token_result result; try { - iface_->check_lock(result.success, args.rqst); + iface_->get_delegation_token(result.success, args.token_owner, args.renewer_kerberos_principal_name); result.__isset.success = true; - } catch (NoSuchTxnException &o1) { + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (TxnAbortedException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (NoSuchLockException &o3) { - result.o3 = o3; - result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.check_lock"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_delegation_token"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("check_lock", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_delegation_token", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -61996,57 +62626,55 @@ void ThriftHiveMetastoreProcessor::process_check_lock(int32_t seqid, ::apache::t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.check_lock"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_delegation_token"); } - oprot->writeMessageBegin("check_lock", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_delegation_token", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.check_lock", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_delegation_token", bytes); } } -void ThriftHiveMetastoreProcessor::process_unlock(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_renew_delegation_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.unlock", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.renew_delegation_token", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.unlock"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.renew_delegation_token"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.unlock"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.renew_delegation_token"); } - ThriftHiveMetastore_unlock_args args; + ThriftHiveMetastore_renew_delegation_token_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.unlock", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.renew_delegation_token", bytes); } - ThriftHiveMetastore_unlock_result result; + ThriftHiveMetastore_renew_delegation_token_result result; try { - iface_->unlock(args.rqst); - } catch (NoSuchLockException &o1) { + result.success = iface_->renew_delegation_token(args.token_str_form); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (TxnOpenException &o2) { - result.o2 = o2; - result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.unlock"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.renew_delegation_token"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("unlock", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("renew_delegation_token", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62055,52 +62683,54 @@ void ThriftHiveMetastoreProcessor::process_unlock(int32_t seqid, ::apache::thrif } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.unlock"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.renew_delegation_token"); } - oprot->writeMessageBegin("unlock", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("renew_delegation_token", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.unlock", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.renew_delegation_token", bytes); } } -void ThriftHiveMetastoreProcessor::process_show_locks(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_cancel_delegation_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.show_locks", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.cancel_delegation_token", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.show_locks"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.cancel_delegation_token"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.show_locks"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.cancel_delegation_token"); } - ThriftHiveMetastore_show_locks_args args; + ThriftHiveMetastore_cancel_delegation_token_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.show_locks", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.cancel_delegation_token", bytes); } - ThriftHiveMetastore_show_locks_result result; + ThriftHiveMetastore_cancel_delegation_token_result result; try { - iface_->show_locks(result.success, args.rqst); - result.__isset.success = true; + iface_->cancel_delegation_token(args.token_str_form); + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.show_locks"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.cancel_delegation_token"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("show_locks", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("cancel_delegation_token", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62109,60 +62739,52 @@ void ThriftHiveMetastoreProcessor::process_show_locks(int32_t seqid, ::apache::t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.show_locks"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.cancel_delegation_token"); } - oprot->writeMessageBegin("show_locks", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("cancel_delegation_token", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.show_locks", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.cancel_delegation_token", bytes); } } -void ThriftHiveMetastoreProcessor::process_heartbeat(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_add_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.heartbeat", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_token", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.heartbeat"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_token"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.heartbeat"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_token"); } - ThriftHiveMetastore_heartbeat_args args; + ThriftHiveMetastore_add_token_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.heartbeat", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_token", bytes); } - ThriftHiveMetastore_heartbeat_result result; + ThriftHiveMetastore_add_token_result result; try { - iface_->heartbeat(args.ids); - } catch (NoSuchLockException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (NoSuchTxnException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (TxnAbortedException &o3) { - result.o3 = o3; - result.__isset.o3 = true; + result.success = iface_->add_token(args.token_identifier, args.delegation_token); + result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.heartbeat"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_token"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("add_token", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62171,52 +62793,52 @@ void ThriftHiveMetastoreProcessor::process_heartbeat(int32_t seqid, ::apache::th } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.heartbeat"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_token"); } - oprot->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("add_token", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.heartbeat", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_token", bytes); } } -void ThriftHiveMetastoreProcessor::process_heartbeat_txn_range(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_remove_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.heartbeat_txn_range", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.remove_token", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.heartbeat_txn_range"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.remove_token"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.heartbeat_txn_range"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.remove_token"); } - ThriftHiveMetastore_heartbeat_txn_range_args args; + ThriftHiveMetastore_remove_token_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.heartbeat_txn_range", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.remove_token", bytes); } - ThriftHiveMetastore_heartbeat_txn_range_result result; + ThriftHiveMetastore_remove_token_result result; try { - iface_->heartbeat_txn_range(result.success, args.txns); + result.success = iface_->remove_token(args.token_identifier); result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.heartbeat_txn_range"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.remove_token"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("remove_token", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62225,51 +62847,52 @@ void ThriftHiveMetastoreProcessor::process_heartbeat_txn_range(int32_t seqid, :: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.heartbeat_txn_range"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.remove_token"); } - oprot->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("remove_token", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.heartbeat_txn_range", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.remove_token", bytes); } } -void ThriftHiveMetastoreProcessor::process_compact(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.compact", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_token", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.compact"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_token"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.compact"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_token"); } - ThriftHiveMetastore_compact_args args; + ThriftHiveMetastore_get_token_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.compact", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_token", bytes); } - ThriftHiveMetastore_compact_result result; + ThriftHiveMetastore_get_token_result result; try { - iface_->compact(args.rqst); + iface_->get_token(result.success, args.token_identifier); + result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.compact"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_token"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("compact", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_token", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62278,52 +62901,52 @@ void ThriftHiveMetastoreProcessor::process_compact(int32_t seqid, ::apache::thri } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.compact"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_token"); } - oprot->writeMessageBegin("compact", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_token", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.compact", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_token", bytes); } } -void ThriftHiveMetastoreProcessor::process_compact2(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_all_token_identifiers(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.compact2", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_all_token_identifiers", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.compact2"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_all_token_identifiers"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.compact2"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_all_token_identifiers"); } - ThriftHiveMetastore_compact2_args args; + ThriftHiveMetastore_get_all_token_identifiers_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.compact2", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_all_token_identifiers", bytes); } - ThriftHiveMetastore_compact2_result result; + ThriftHiveMetastore_get_all_token_identifiers_result result; try { - iface_->compact2(result.success, args.rqst); + iface_->get_all_token_identifiers(result.success); result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.compact2"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_all_token_identifiers"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("compact2", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_all_token_identifiers", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62332,52 +62955,55 @@ void ThriftHiveMetastoreProcessor::process_compact2(int32_t seqid, ::apache::thr } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.compact2"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_all_token_identifiers"); } - oprot->writeMessageBegin("compact2", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_all_token_identifiers", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.compact2", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_all_token_identifiers", bytes); } } -void ThriftHiveMetastoreProcessor::process_show_compact(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_add_master_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.show_compact", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_master_key", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.show_compact"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_master_key"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.show_compact"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_master_key"); } - ThriftHiveMetastore_show_compact_args args; + ThriftHiveMetastore_add_master_key_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.show_compact", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_master_key", bytes); } - ThriftHiveMetastore_show_compact_result result; + ThriftHiveMetastore_add_master_key_result result; try { - iface_->show_compact(result.success, args.rqst); + result.success = iface_->add_master_key(args.key); result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.show_compact"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_master_key"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("add_master_key", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62386,57 +63012,57 @@ void ThriftHiveMetastoreProcessor::process_show_compact(int32_t seqid, ::apache: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.show_compact"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_master_key"); } - oprot->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("add_master_key", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.show_compact", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_master_key", bytes); } } -void ThriftHiveMetastoreProcessor::process_add_dynamic_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_update_master_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_dynamic_partitions", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.update_master_key", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_dynamic_partitions"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.update_master_key"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_dynamic_partitions"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.update_master_key"); } - ThriftHiveMetastore_add_dynamic_partitions_args args; + ThriftHiveMetastore_update_master_key_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_dynamic_partitions", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.update_master_key", bytes); } - ThriftHiveMetastore_add_dynamic_partitions_result result; + ThriftHiveMetastore_update_master_key_result result; try { - iface_->add_dynamic_partitions(args.rqst); - } catch (NoSuchTxnException &o1) { + iface_->update_master_key(args.seq_number, args.key); + } catch (NoSuchObjectException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (TxnAbortedException &o2) { + } catch (MetaException &o2) { result.o2 = o2; result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_dynamic_partitions"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.update_master_key"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("update_master_key", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62445,52 +63071,52 @@ void ThriftHiveMetastoreProcessor::process_add_dynamic_partitions(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_dynamic_partitions"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.update_master_key"); } - oprot->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("update_master_key", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_dynamic_partitions", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.update_master_key", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_next_notification(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_remove_master_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_next_notification", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.remove_master_key", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_next_notification"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.remove_master_key"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_next_notification"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.remove_master_key"); } - ThriftHiveMetastore_get_next_notification_args args; + ThriftHiveMetastore_remove_master_key_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_next_notification", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.remove_master_key", bytes); } - ThriftHiveMetastore_get_next_notification_result result; + ThriftHiveMetastore_remove_master_key_result result; try { - iface_->get_next_notification(result.success, args.rqst); + result.success = iface_->remove_master_key(args.key_seq); result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_next_notification"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.remove_master_key"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("remove_master_key", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62499,52 +63125,52 @@ void ThriftHiveMetastoreProcessor::process_get_next_notification(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_next_notification"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.remove_master_key"); } - oprot->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("remove_master_key", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_next_notification", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.remove_master_key", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_current_notificationEventId(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_master_keys(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_current_notificationEventId", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_master_keys", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_current_notificationEventId"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_master_keys"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_current_notificationEventId"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_master_keys"); } - ThriftHiveMetastore_get_current_notificationEventId_args args; + ThriftHiveMetastore_get_master_keys_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_current_notificationEventId", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_master_keys", bytes); } - ThriftHiveMetastore_get_current_notificationEventId_result result; + ThriftHiveMetastore_get_master_keys_result result; try { - iface_->get_current_notificationEventId(result.success); + iface_->get_master_keys(result.success); result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_current_notificationEventId"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_master_keys"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_master_keys", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62553,52 +63179,52 @@ void ThriftHiveMetastoreProcessor::process_get_current_notificationEventId(int32 } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_current_notificationEventId"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_master_keys"); } - oprot->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_master_keys", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_current_notificationEventId", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_master_keys", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_notification_events_count(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_open_txns(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_notification_events_count", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_open_txns", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_notification_events_count"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_open_txns"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_notification_events_count"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_open_txns"); } - ThriftHiveMetastore_get_notification_events_count_args args; + ThriftHiveMetastore_get_open_txns_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_notification_events_count", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_open_txns", bytes); } - ThriftHiveMetastore_get_notification_events_count_result result; + ThriftHiveMetastore_get_open_txns_result result; try { - iface_->get_notification_events_count(result.success, args.rqst); + iface_->get_open_txns(result.success); result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_notification_events_count"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_open_txns"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_notification_events_count", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_open_txns", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62607,52 +63233,52 @@ void ThriftHiveMetastoreProcessor::process_get_notification_events_count(int32_t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_notification_events_count"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_open_txns"); } - oprot->writeMessageBegin("get_notification_events_count", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_open_txns", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_notification_events_count", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_open_txns", bytes); } } -void ThriftHiveMetastoreProcessor::process_fire_listener_event(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_open_txns_info(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.fire_listener_event", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_open_txns_info", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.fire_listener_event"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_open_txns_info"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.fire_listener_event"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_open_txns_info"); } - ThriftHiveMetastore_fire_listener_event_args args; + ThriftHiveMetastore_get_open_txns_info_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.fire_listener_event", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_open_txns_info", bytes); } - ThriftHiveMetastore_fire_listener_event_result result; + ThriftHiveMetastore_get_open_txns_info_result result; try { - iface_->fire_listener_event(result.success, args.rqst); + iface_->get_open_txns_info(result.success); result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.fire_listener_event"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_open_txns_info"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_open_txns_info", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62661,51 +63287,52 @@ void ThriftHiveMetastoreProcessor::process_fire_listener_event(int32_t seqid, :: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.fire_listener_event"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_open_txns_info"); } - oprot->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_open_txns_info", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.fire_listener_event", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_open_txns_info", bytes); } } -void ThriftHiveMetastoreProcessor::process_flushCache(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_open_txns(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.flushCache", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.open_txns", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.flushCache"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.open_txns"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.flushCache"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.open_txns"); } - ThriftHiveMetastore_flushCache_args args; + ThriftHiveMetastore_open_txns_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.flushCache", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.open_txns", bytes); } - ThriftHiveMetastore_flushCache_result result; + ThriftHiveMetastore_open_txns_result result; try { - iface_->flushCache(); + iface_->open_txns(result.success, args.rqst); + result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.flushCache"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.open_txns"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("open_txns", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62714,55 +63341,54 @@ void ThriftHiveMetastoreProcessor::process_flushCache(int32_t seqid, ::apache::t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.flushCache"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.open_txns"); } - oprot->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("open_txns", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.flushCache", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.open_txns", bytes); } } -void ThriftHiveMetastoreProcessor::process_cm_recycle(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_abort_txn(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.cm_recycle", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.abort_txn", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.cm_recycle"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.abort_txn"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.cm_recycle"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.abort_txn"); } - ThriftHiveMetastore_cm_recycle_args args; + ThriftHiveMetastore_abort_txn_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.cm_recycle", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.abort_txn", bytes); } - ThriftHiveMetastore_cm_recycle_result result; + ThriftHiveMetastore_abort_txn_result result; try { - iface_->cm_recycle(result.success, args.request); - result.__isset.success = true; - } catch (MetaException &o1) { + iface_->abort_txn(args.rqst); + } catch (NoSuchTxnException &o1) { result.o1 = o1; result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.cm_recycle"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.abort_txn"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("cm_recycle", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("abort_txn", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62771,52 +63397,54 @@ void ThriftHiveMetastoreProcessor::process_cm_recycle(int32_t seqid, ::apache::t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.cm_recycle"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.abort_txn"); } - oprot->writeMessageBegin("cm_recycle", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("abort_txn", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.cm_recycle", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.abort_txn", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_file_metadata_by_expr(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_abort_txns(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_file_metadata_by_expr", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.abort_txns", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.abort_txns"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.abort_txns"); } - ThriftHiveMetastore_get_file_metadata_by_expr_args args; + ThriftHiveMetastore_abort_txns_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.abort_txns", bytes); } - ThriftHiveMetastore_get_file_metadata_by_expr_result result; + ThriftHiveMetastore_abort_txns_result result; try { - iface_->get_file_metadata_by_expr(result.success, args.req); - result.__isset.success = true; + iface_->abort_txns(args.rqst); + } catch (NoSuchTxnException &o1) { + result.o1 = o1; + result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.abort_txns"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("abort_txns", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62825,52 +63453,57 @@ void ThriftHiveMetastoreProcessor::process_get_file_metadata_by_expr(int32_t seq } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.abort_txns"); } - oprot->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("abort_txns", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.abort_txns", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_commit_txn(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_file_metadata", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.commit_txn", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_file_metadata"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.commit_txn"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_file_metadata"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.commit_txn"); } - ThriftHiveMetastore_get_file_metadata_args args; + ThriftHiveMetastore_commit_txn_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_file_metadata", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.commit_txn", bytes); } - ThriftHiveMetastore_get_file_metadata_result result; + ThriftHiveMetastore_commit_txn_result result; try { - iface_->get_file_metadata(result.success, args.req); - result.__isset.success = true; + iface_->commit_txn(args.rqst); + } catch (NoSuchTxnException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (TxnAbortedException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_file_metadata"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.commit_txn"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("commit_txn", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62879,52 +63512,58 @@ void ThriftHiveMetastoreProcessor::process_get_file_metadata(int32_t seqid, ::ap } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_file_metadata"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.commit_txn"); } - oprot->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("commit_txn", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_file_metadata", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.commit_txn", bytes); } } -void ThriftHiveMetastoreProcessor::process_put_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_lock(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.put_file_metadata", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.lock", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.put_file_metadata"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.lock"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.put_file_metadata"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.lock"); } - ThriftHiveMetastore_put_file_metadata_args args; + ThriftHiveMetastore_lock_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.put_file_metadata", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.lock", bytes); } - ThriftHiveMetastore_put_file_metadata_result result; + ThriftHiveMetastore_lock_result result; try { - iface_->put_file_metadata(result.success, args.req); + iface_->lock(result.success, args.rqst); result.__isset.success = true; + } catch (NoSuchTxnException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (TxnAbortedException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.put_file_metadata"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.lock"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("lock", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62933,52 +63572,61 @@ void ThriftHiveMetastoreProcessor::process_put_file_metadata(int32_t seqid, ::ap } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.put_file_metadata"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.lock"); } - oprot->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("lock", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.put_file_metadata", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.lock", bytes); } } -void ThriftHiveMetastoreProcessor::process_clear_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_check_lock(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.clear_file_metadata", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.check_lock", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.clear_file_metadata"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.check_lock"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.clear_file_metadata"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.check_lock"); } - ThriftHiveMetastore_clear_file_metadata_args args; + ThriftHiveMetastore_check_lock_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.clear_file_metadata", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.check_lock", bytes); } - ThriftHiveMetastore_clear_file_metadata_result result; + ThriftHiveMetastore_check_lock_result result; try { - iface_->clear_file_metadata(result.success, args.req); + iface_->check_lock(result.success, args.rqst); result.__isset.success = true; + } catch (NoSuchTxnException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (TxnAbortedException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (NoSuchLockException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.clear_file_metadata"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.check_lock"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("check_lock", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -62987,52 +63635,57 @@ void ThriftHiveMetastoreProcessor::process_clear_file_metadata(int32_t seqid, :: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.clear_file_metadata"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.check_lock"); } - oprot->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("check_lock", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.clear_file_metadata", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.check_lock", bytes); } } -void ThriftHiveMetastoreProcessor::process_cache_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_unlock(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.cache_file_metadata", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.unlock", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.cache_file_metadata"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.unlock"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.cache_file_metadata"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.unlock"); } - ThriftHiveMetastore_cache_file_metadata_args args; + ThriftHiveMetastore_unlock_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.cache_file_metadata", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.unlock", bytes); } - ThriftHiveMetastore_cache_file_metadata_result result; + ThriftHiveMetastore_unlock_result result; try { - iface_->cache_file_metadata(result.success, args.req); - result.__isset.success = true; + iface_->unlock(args.rqst); + } catch (NoSuchLockException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (TxnOpenException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.cache_file_metadata"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.unlock"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("unlock", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -63041,55 +63694,52 @@ void ThriftHiveMetastoreProcessor::process_cache_file_metadata(int32_t seqid, :: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.cache_file_metadata"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.unlock"); } - oprot->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("unlock", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.cache_file_metadata", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.unlock", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_metastore_db_uuid(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_show_locks(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_metastore_db_uuid", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.show_locks", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.show_locks"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.show_locks"); } - ThriftHiveMetastore_get_metastore_db_uuid_args args; + ThriftHiveMetastore_show_locks_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_metastore_db_uuid", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.show_locks", bytes); } - ThriftHiveMetastore_get_metastore_db_uuid_result result; + ThriftHiveMetastore_show_locks_result result; try { - iface_->get_metastore_db_uuid(result.success); + iface_->show_locks(result.success, args.rqst); result.__isset.success = true; - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.show_locks"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_metastore_db_uuid", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("show_locks", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -63098,61 +63748,60 @@ void ThriftHiveMetastoreProcessor::process_get_metastore_db_uuid(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.show_locks"); } - oprot->writeMessageBegin("get_metastore_db_uuid", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("show_locks", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_metastore_db_uuid", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.show_locks", bytes); } } -void ThriftHiveMetastoreProcessor::process_create_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_heartbeat(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_resource_plan", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.heartbeat", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_resource_plan"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.heartbeat"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_resource_plan"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.heartbeat"); } - ThriftHiveMetastore_create_resource_plan_args args; + ThriftHiveMetastore_heartbeat_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_resource_plan", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.heartbeat", bytes); } - ThriftHiveMetastore_create_resource_plan_result result; + ThriftHiveMetastore_heartbeat_result result; try { - iface_->create_resource_plan(result.success, args.request); - result.__isset.success = true; - } catch (AlreadyExistsException &o1) { + iface_->heartbeat(args.ids); + } catch (NoSuchLockException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (InvalidObjectException &o2) { + } catch (NoSuchTxnException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (MetaException &o3) { + } catch (TxnAbortedException &o3) { result.o3 = o3; result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_resource_plan"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.heartbeat"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("create_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -63161,58 +63810,52 @@ void ThriftHiveMetastoreProcessor::process_create_resource_plan(int32_t seqid, : } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_resource_plan"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.heartbeat"); } - oprot->writeMessageBegin("create_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_resource_plan", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.heartbeat", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_heartbeat_txn_range(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_resource_plan", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.heartbeat_txn_range", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_resource_plan"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.heartbeat_txn_range"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_resource_plan"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.heartbeat_txn_range"); } - ThriftHiveMetastore_get_resource_plan_args args; + ThriftHiveMetastore_heartbeat_txn_range_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_resource_plan", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.heartbeat_txn_range", bytes); } - ThriftHiveMetastore_get_resource_plan_result result; + ThriftHiveMetastore_heartbeat_txn_range_result result; try { - iface_->get_resource_plan(result.success, args.request); + iface_->heartbeat_txn_range(result.success, args.txns); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_resource_plan"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.heartbeat_txn_range"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -63221,55 +63864,51 @@ void ThriftHiveMetastoreProcessor::process_get_resource_plan(int32_t seqid, ::ap } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_resource_plan"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.heartbeat_txn_range"); } - oprot->writeMessageBegin("get_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_resource_plan", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.heartbeat_txn_range", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_active_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_compact(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_active_resource_plan", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.compact", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_active_resource_plan"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.compact"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_active_resource_plan"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.compact"); } - ThriftHiveMetastore_get_active_resource_plan_args args; + ThriftHiveMetastore_compact_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_active_resource_plan", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.compact", bytes); } - ThriftHiveMetastore_get_active_resource_plan_result result; + ThriftHiveMetastore_compact_result result; try { - iface_->get_active_resource_plan(result.success, args.request); - result.__isset.success = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; + iface_->compact(args.rqst); } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_active_resource_plan"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.compact"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_active_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("compact", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -63278,55 +63917,52 @@ void ThriftHiveMetastoreProcessor::process_get_active_resource_plan(int32_t seqi } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_active_resource_plan"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.compact"); } - oprot->writeMessageBegin("get_active_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("compact", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_active_resource_plan", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.compact", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_all_resource_plans(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_compact2(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_all_resource_plans", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.compact2", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_all_resource_plans"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.compact2"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_all_resource_plans"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.compact2"); } - ThriftHiveMetastore_get_all_resource_plans_args args; + ThriftHiveMetastore_compact2_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_all_resource_plans", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.compact2", bytes); } - ThriftHiveMetastore_get_all_resource_plans_result result; + ThriftHiveMetastore_compact2_result result; try { - iface_->get_all_resource_plans(result.success, args.request); + iface_->compact2(result.success, args.rqst); result.__isset.success = true; - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_all_resource_plans"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.compact2"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_all_resource_plans", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("compact2", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -63335,61 +63971,111 @@ void ThriftHiveMetastoreProcessor::process_get_all_resource_plans(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_all_resource_plans"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.compact2"); } - oprot->writeMessageBegin("get_all_resource_plans", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("compact2", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_all_resource_plans", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.compact2", bytes); } } -void ThriftHiveMetastoreProcessor::process_alter_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_show_compact(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_resource_plan", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.show_compact", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_resource_plan"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.show_compact"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_resource_plan"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.show_compact"); } - ThriftHiveMetastore_alter_resource_plan_args args; + ThriftHiveMetastore_show_compact_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_resource_plan", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.show_compact", bytes); } - ThriftHiveMetastore_alter_resource_plan_result result; + ThriftHiveMetastore_show_compact_result result; try { - iface_->alter_resource_plan(result.success, args.request); + iface_->show_compact(result.success, args.rqst); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.show_compact"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.show_compact"); + } + + oprot->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.show_compact", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_add_dynamic_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_dynamic_partitions", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_dynamic_partitions"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_dynamic_partitions"); + } + + ThriftHiveMetastore_add_dynamic_partitions_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_dynamic_partitions", bytes); + } + + ThriftHiveMetastore_add_dynamic_partitions_result result; + try { + iface_->add_dynamic_partitions(args.rqst); + } catch (NoSuchTxnException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (InvalidOperationException &o2) { + } catch (TxnAbortedException &o2) { result.o2 = o2; result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_resource_plan"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_dynamic_partitions"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("alter_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -63398,58 +64084,52 @@ void ThriftHiveMetastoreProcessor::process_alter_resource_plan(int32_t seqid, :: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_resource_plan"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_dynamic_partitions"); } - oprot->writeMessageBegin("alter_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_resource_plan", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_dynamic_partitions", bytes); } } -void ThriftHiveMetastoreProcessor::process_validate_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_next_notification(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.validate_resource_plan", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_next_notification", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.validate_resource_plan"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_next_notification"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.validate_resource_plan"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_next_notification"); } - ThriftHiveMetastore_validate_resource_plan_args args; + ThriftHiveMetastore_get_next_notification_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.validate_resource_plan", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_next_notification", bytes); } - ThriftHiveMetastore_validate_resource_plan_result result; + ThriftHiveMetastore_get_next_notification_result result; try { - iface_->validate_resource_plan(result.success, args.request); + iface_->get_next_notification(result.success, args.rqst); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.validate_resource_plan"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_next_notification"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("validate_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -63458,61 +64138,52 @@ void ThriftHiveMetastoreProcessor::process_validate_resource_plan(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.validate_resource_plan"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_next_notification"); } - oprot->writeMessageBegin("validate_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.validate_resource_plan", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_next_notification", bytes); } } -void ThriftHiveMetastoreProcessor::process_drop_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_current_notificationEventId(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_resource_plan", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_current_notificationEventId", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_resource_plan"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_current_notificationEventId"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_resource_plan"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_current_notificationEventId"); } - ThriftHiveMetastore_drop_resource_plan_args args; + ThriftHiveMetastore_get_current_notificationEventId_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_resource_plan", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_current_notificationEventId", bytes); } - ThriftHiveMetastore_drop_resource_plan_result result; + ThriftHiveMetastore_get_current_notificationEventId_result result; try { - iface_->drop_resource_plan(result.success, args.request); + iface_->get_current_notificationEventId(result.success); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (InvalidOperationException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_resource_plan"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_current_notificationEventId"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -63521,64 +64192,52 @@ void ThriftHiveMetastoreProcessor::process_drop_resource_plan(int32_t seqid, ::a } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_resource_plan"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_current_notificationEventId"); } - oprot->writeMessageBegin("drop_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_resource_plan", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_current_notificationEventId", bytes); } } -void ThriftHiveMetastoreProcessor::process_create_wm_trigger(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_notification_events_count(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_wm_trigger", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_notification_events_count", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_wm_trigger"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_notification_events_count"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_wm_trigger"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_notification_events_count"); } - ThriftHiveMetastore_create_wm_trigger_args args; + ThriftHiveMetastore_get_notification_events_count_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_wm_trigger", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_notification_events_count", bytes); } - ThriftHiveMetastore_create_wm_trigger_result result; + ThriftHiveMetastore_get_notification_events_count_result result; try { - iface_->create_wm_trigger(result.success, args.request); + iface_->get_notification_events_count(result.success, args.rqst); result.__isset.success = true; - } catch (AlreadyExistsException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (NoSuchObjectException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (InvalidObjectException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (MetaException &o4) { - result.o4 = o4; - result.__isset.o4 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_wm_trigger"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_notification_events_count"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("create_wm_trigger", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_notification_events_count", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -63587,227 +64246,2238 @@ void ThriftHiveMetastoreProcessor::process_create_wm_trigger(int32_t seqid, ::ap } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_wm_trigger"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_notification_events_count"); } - oprot->writeMessageBegin("create_wm_trigger", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_notification_events_count", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_wm_trigger", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_notification_events_count", bytes); } } -void ThriftHiveMetastoreProcessor::process_alter_wm_trigger(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_fire_listener_event(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_wm_trigger", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.fire_listener_event", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_wm_trigger"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.fire_listener_event"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_wm_trigger"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.fire_listener_event"); } - ThriftHiveMetastore_alter_wm_trigger_args args; + ThriftHiveMetastore_fire_listener_event_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_wm_trigger", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.fire_listener_event", bytes); } - ThriftHiveMetastore_alter_wm_trigger_result result; + ThriftHiveMetastore_fire_listener_event_result result; try { - iface_->alter_wm_trigger(result.success, args.request); + iface_->fire_listener_event(result.success, args.rqst); result.__isset.success = true; - } catch (NoSuchObjectException &o1) { + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.fire_listener_event"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.fire_listener_event"); + } + + oprot->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.fire_listener_event", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_flushCache(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.flushCache", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.flushCache"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.flushCache"); + } + + ThriftHiveMetastore_flushCache_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.flushCache", bytes); + } + + ThriftHiveMetastore_flushCache_result result; + try { + iface_->flushCache(); + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.flushCache"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.flushCache"); + } + + oprot->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.flushCache", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_cm_recycle(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.cm_recycle", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.cm_recycle"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.cm_recycle"); + } + + ThriftHiveMetastore_cm_recycle_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.cm_recycle", bytes); + } + + ThriftHiveMetastore_cm_recycle_result result; + try { + iface_->cm_recycle(result.success, args.request); + result.__isset.success = true; + } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; - } catch (InvalidObjectException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_wm_trigger"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.cm_recycle"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("cm_recycle", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.cm_recycle"); + } + + oprot->writeMessageBegin("cm_recycle", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.cm_recycle", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_file_metadata_by_expr(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_file_metadata_by_expr", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + } + + ThriftHiveMetastore_get_file_metadata_by_expr_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr", bytes); + } + + ThriftHiveMetastore_get_file_metadata_by_expr_result result; + try { + iface_->get_file_metadata_by_expr(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + } + + oprot->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_file_metadata", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_file_metadata"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_file_metadata"); + } + + ThriftHiveMetastore_get_file_metadata_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_file_metadata", bytes); + } + + ThriftHiveMetastore_get_file_metadata_result result; + try { + iface_->get_file_metadata(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_file_metadata"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_file_metadata"); + } + + oprot->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_file_metadata", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_put_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.put_file_metadata", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.put_file_metadata"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.put_file_metadata"); + } + + ThriftHiveMetastore_put_file_metadata_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.put_file_metadata", bytes); + } + + ThriftHiveMetastore_put_file_metadata_result result; + try { + iface_->put_file_metadata(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.put_file_metadata"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.put_file_metadata"); + } + + oprot->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.put_file_metadata", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_clear_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.clear_file_metadata", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.clear_file_metadata"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.clear_file_metadata"); + } + + ThriftHiveMetastore_clear_file_metadata_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.clear_file_metadata", bytes); + } + + ThriftHiveMetastore_clear_file_metadata_result result; + try { + iface_->clear_file_metadata(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.clear_file_metadata"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.clear_file_metadata"); + } + + oprot->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.clear_file_metadata", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_cache_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.cache_file_metadata", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.cache_file_metadata"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.cache_file_metadata"); + } + + ThriftHiveMetastore_cache_file_metadata_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.cache_file_metadata", bytes); + } + + ThriftHiveMetastore_cache_file_metadata_result result; + try { + iface_->cache_file_metadata(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.cache_file_metadata"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.cache_file_metadata"); + } + + oprot->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.cache_file_metadata", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_metastore_db_uuid(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_metastore_db_uuid", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); + } + + ThriftHiveMetastore_get_metastore_db_uuid_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_metastore_db_uuid", bytes); + } + + ThriftHiveMetastore_get_metastore_db_uuid_result result; + try { + iface_->get_metastore_db_uuid(result.success); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_metastore_db_uuid", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); + } + + oprot->writeMessageBegin("get_metastore_db_uuid", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_metastore_db_uuid", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_create_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_resource_plan", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_resource_plan"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_resource_plan"); + } + + ThriftHiveMetastore_create_resource_plan_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_resource_plan", bytes); + } + + ThriftHiveMetastore_create_resource_plan_result result; + try { + iface_->create_resource_plan(result.success, args.request); + result.__isset.success = true; + } catch (AlreadyExistsException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_resource_plan"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("create_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_resource_plan"); + } + + oprot->writeMessageBegin("create_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_resource_plan", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_resource_plan", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_resource_plan"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_resource_plan"); + } + + ThriftHiveMetastore_get_resource_plan_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_resource_plan", bytes); + } + + ThriftHiveMetastore_get_resource_plan_result result; + try { + iface_->get_resource_plan(result.success, args.request); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_resource_plan"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_resource_plan"); + } + + oprot->writeMessageBegin("get_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_resource_plan", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_active_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_active_resource_plan", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_active_resource_plan"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_active_resource_plan"); + } + + ThriftHiveMetastore_get_active_resource_plan_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_active_resource_plan", bytes); + } + + ThriftHiveMetastore_get_active_resource_plan_result result; + try { + iface_->get_active_resource_plan(result.success, args.request); + result.__isset.success = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_active_resource_plan"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_active_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_active_resource_plan"); + } + + oprot->writeMessageBegin("get_active_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_active_resource_plan", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_all_resource_plans(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_all_resource_plans", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_all_resource_plans"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_all_resource_plans"); + } + + ThriftHiveMetastore_get_all_resource_plans_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_all_resource_plans", bytes); + } + + ThriftHiveMetastore_get_all_resource_plans_result result; + try { + iface_->get_all_resource_plans(result.success, args.request); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_all_resource_plans"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_all_resource_plans", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_all_resource_plans"); + } + + oprot->writeMessageBegin("get_all_resource_plans", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_all_resource_plans", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_alter_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_resource_plan", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_resource_plan"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_resource_plan"); + } + + ThriftHiveMetastore_alter_resource_plan_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_resource_plan", bytes); + } + + ThriftHiveMetastore_alter_resource_plan_result result; + try { + iface_->alter_resource_plan(result.success, args.request); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidOperationException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_resource_plan"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("alter_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_resource_plan"); + } + + oprot->writeMessageBegin("alter_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_resource_plan", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_validate_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.validate_resource_plan", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.validate_resource_plan"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.validate_resource_plan"); + } + + ThriftHiveMetastore_validate_resource_plan_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.validate_resource_plan", bytes); + } + + ThriftHiveMetastore_validate_resource_plan_result result; + try { + iface_->validate_resource_plan(result.success, args.request); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.validate_resource_plan"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("validate_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.validate_resource_plan"); + } + + oprot->writeMessageBegin("validate_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.validate_resource_plan", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_drop_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_resource_plan", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_resource_plan"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_resource_plan"); + } + + ThriftHiveMetastore_drop_resource_plan_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_resource_plan", bytes); + } + + ThriftHiveMetastore_drop_resource_plan_result result; + try { + iface_->drop_resource_plan(result.success, args.request); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidOperationException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_resource_plan"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("drop_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_resource_plan"); + } + + oprot->writeMessageBegin("drop_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_resource_plan", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_create_wm_trigger(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_wm_trigger", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_wm_trigger"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_wm_trigger"); + } + + ThriftHiveMetastore_create_wm_trigger_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_wm_trigger", bytes); + } + + ThriftHiveMetastore_create_wm_trigger_result result; + try { + iface_->create_wm_trigger(result.success, args.request); + result.__isset.success = true; + } catch (AlreadyExistsException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (InvalidObjectException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (MetaException &o4) { + result.o4 = o4; + result.__isset.o4 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_wm_trigger"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("create_wm_trigger", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_wm_trigger"); + } + + oprot->writeMessageBegin("create_wm_trigger", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_wm_trigger", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_alter_wm_trigger(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_wm_trigger", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_wm_trigger"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_wm_trigger"); + } + + ThriftHiveMetastore_alter_wm_trigger_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_wm_trigger", bytes); + } + + ThriftHiveMetastore_alter_wm_trigger_result result; + try { + iface_->alter_wm_trigger(result.success, args.request); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_wm_trigger"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("alter_wm_trigger", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_wm_trigger"); + } + + oprot->writeMessageBegin("alter_wm_trigger", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_wm_trigger", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_drop_wm_trigger(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_wm_trigger", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_wm_trigger"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_wm_trigger"); + } + + ThriftHiveMetastore_drop_wm_trigger_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_wm_trigger", bytes); + } + + ThriftHiveMetastore_drop_wm_trigger_result result; + try { + iface_->drop_wm_trigger(result.success, args.request); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidOperationException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_wm_trigger"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("drop_wm_trigger", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_wm_trigger"); + } + + oprot->writeMessageBegin("drop_wm_trigger", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_wm_trigger", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_triggers_for_resourceplan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_triggers_for_resourceplan", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_triggers_for_resourceplan"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_triggers_for_resourceplan"); + } + + ThriftHiveMetastore_get_triggers_for_resourceplan_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_triggers_for_resourceplan", bytes); + } + + ThriftHiveMetastore_get_triggers_for_resourceplan_result result; + try { + iface_->get_triggers_for_resourceplan(result.success, args.request); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_triggers_for_resourceplan"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_triggers_for_resourceplan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_triggers_for_resourceplan"); + } + + oprot->writeMessageBegin("get_triggers_for_resourceplan", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_triggers_for_resourceplan", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_create_or_update_wm_pool(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_or_update_wm_pool", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_or_update_wm_pool"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_or_update_wm_pool"); + } + + ThriftHiveMetastore_create_or_update_wm_pool_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_or_update_wm_pool", bytes); + } + + ThriftHiveMetastore_create_or_update_wm_pool_result result; + try { + iface_->create_or_update_wm_pool(result.success, args.request); + result.__isset.success = true; + } catch (AlreadyExistsException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (InvalidObjectException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (MetaException &o4) { + result.o4 = o4; + result.__isset.o4 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_or_update_wm_pool"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("create_or_update_wm_pool", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_or_update_wm_pool"); + } + + oprot->writeMessageBegin("create_or_update_wm_pool", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_or_update_wm_pool", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_drop_wm_pool(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_wm_pool", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_wm_pool"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_wm_pool"); + } + + ThriftHiveMetastore_drop_wm_pool_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_wm_pool", bytes); + } + + ThriftHiveMetastore_drop_wm_pool_result result; + try { + iface_->drop_wm_pool(result.success, args.request); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidOperationException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_wm_pool"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("drop_wm_pool", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_wm_pool"); + } + + oprot->writeMessageBegin("drop_wm_pool", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_wm_pool", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_create_or_update_wm_mapping(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_or_update_wm_mapping", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_or_update_wm_mapping"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_or_update_wm_mapping"); + } + + ThriftHiveMetastore_create_or_update_wm_mapping_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_or_update_wm_mapping", bytes); + } + + ThriftHiveMetastore_create_or_update_wm_mapping_result result; + try { + iface_->create_or_update_wm_mapping(result.success, args.request); + result.__isset.success = true; + } catch (AlreadyExistsException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (InvalidObjectException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (MetaException &o4) { + result.o4 = o4; + result.__isset.o4 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_or_update_wm_mapping"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("create_or_update_wm_mapping", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_or_update_wm_mapping"); + } + + oprot->writeMessageBegin("create_or_update_wm_mapping", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_or_update_wm_mapping", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_drop_wm_mapping(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_wm_mapping", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_wm_mapping"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_wm_mapping"); + } + + ThriftHiveMetastore_drop_wm_mapping_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_wm_mapping", bytes); + } + + ThriftHiveMetastore_drop_wm_mapping_result result; + try { + iface_->drop_wm_mapping(result.success, args.request); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidOperationException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_wm_mapping"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("drop_wm_mapping", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_wm_mapping"); + } + + oprot->writeMessageBegin("drop_wm_mapping", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_wm_mapping", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_create_or_drop_wm_trigger_to_pool_mapping(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_or_drop_wm_trigger_to_pool_mapping", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_or_drop_wm_trigger_to_pool_mapping"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_or_drop_wm_trigger_to_pool_mapping"); + } + + ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_or_drop_wm_trigger_to_pool_mapping", bytes); + } + + ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result result; + try { + iface_->create_or_drop_wm_trigger_to_pool_mapping(result.success, args.request); + result.__isset.success = true; + } catch (AlreadyExistsException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (InvalidObjectException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (MetaException &o4) { + result.o4 = o4; + result.__isset.o4 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_or_drop_wm_trigger_to_pool_mapping"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("create_or_drop_wm_trigger_to_pool_mapping", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_or_drop_wm_trigger_to_pool_mapping"); + } + + oprot->writeMessageBegin("create_or_drop_wm_trigger_to_pool_mapping", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_or_drop_wm_trigger_to_pool_mapping", bytes); + } +} + +::boost::shared_ptr< ::apache::thrift::TProcessor > ThriftHiveMetastoreProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) { + ::apache::thrift::ReleaseHandler< ThriftHiveMetastoreIfFactory > cleanup(handlerFactory_); + ::boost::shared_ptr< ThriftHiveMetastoreIf > handler(handlerFactory_->getHandler(connInfo), cleanup); + ::boost::shared_ptr< ::apache::thrift::TProcessor > processor(new ThriftHiveMetastoreProcessor(handler)); + return processor; +} + +void ThriftHiveMetastoreConcurrentClient::getMetaConf(std::string& _return, const std::string& key) +{ + int32_t seqid = send_getMetaConf(key); + recv_getMetaConf(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_getMetaConf(const std::string& key) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("getMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_getMetaConf_pargs args; + args.key = &key; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("getMetaConf") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_getMetaConf_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getMetaConf failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::setMetaConf(const std::string& key, const std::string& value) +{ + int32_t seqid = send_setMetaConf(key, value); + recv_setMetaConf(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_setMetaConf(const std::string& key, const std::string& value) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("setMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_setMetaConf_pargs args; + args.key = &key; + args.value = &value; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("setMetaConf") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_setMetaConf_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::create_database(const Database& database) +{ + int32_t seqid = send_create_database(database); + recv_create_database(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_create_database(const Database& database) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("create_database", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_create_database_pargs args; + args.database = &database; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_create_database_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_database(Database& _return, const std::string& name) +{ + int32_t seqid = send_get_database(name); + recv_get_database(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_database(const std::string& name) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_database", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_database_pargs args; + args.name = &name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_database_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_database failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::drop_database(const std::string& name, const bool deleteData, const bool cascade) +{ + int32_t seqid = send_drop_database(name, deleteData, cascade); + recv_drop_database(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_database(const std::string& name, const bool deleteData, const bool cascade) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_database_pargs args; + args.name = &name; + args.deleteData = &deleteData; + args.cascade = &cascade; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_drop_database_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_databases(std::vector & _return, const std::string& pattern) +{ + int32_t seqid = send_get_databases(pattern); + recv_get_databases(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_databases(const std::string& pattern) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_databases_pargs args; + args.pattern = &pattern; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vector & _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_databases") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_databases_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_databases failed: unknown result"); } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("alter_wm_trigger", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_wm_trigger"); - } +void ThriftHiveMetastoreConcurrentClient::get_all_databases(std::vector & _return) +{ + int32_t seqid = send_get_all_databases(); + recv_get_all_databases(_return, seqid); +} - oprot->writeMessageBegin("alter_wm_trigger", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_databases() +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_CALL, cseqid); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_wm_trigger", bytes); - } + ThriftHiveMetastore_get_all_databases_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; } -void ThriftHiveMetastoreProcessor::process_drop_wm_trigger(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vector & _return, const int32_t seqid) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_wm_trigger", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_wm_trigger"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_wm_trigger"); - } - ThriftHiveMetastore_drop_wm_trigger_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_wm_trigger", bytes); - } + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - ThriftHiveMetastore_drop_wm_trigger_result result; - try { - iface_->drop_wm_trigger(result.success, args.request); - result.__isset.success = true; - } catch (NoSuchObjectException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (InvalidOperationException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (MetaException &o3) { - result.o3 = o3; - result.__isset.o3 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_wm_trigger"); + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_all_databases") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("drop_wm_trigger", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_all_databases_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_wm_trigger"); - } + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_databases failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); - oprot->writeMessageBegin("drop_wm_trigger", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_wm_trigger", bytes); - } +void ThriftHiveMetastoreConcurrentClient::alter_database(const std::string& dbname, const Database& db) +{ + int32_t seqid = send_alter_database(dbname, db); + recv_alter_database(seqid); } -void ThriftHiveMetastoreProcessor::process_get_triggers_for_resourceplan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_database(const std::string& dbname, const Database& db) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_triggers_for_resourceplan", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_triggers_for_resourceplan"); + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("alter_database", ::apache::thrift::protocol::T_CALL, cseqid); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_triggers_for_resourceplan"); - } + ThriftHiveMetastore_alter_database_pargs args; + args.dbname = &dbname; + args.db = &db; + args.write(oprot_); - ThriftHiveMetastore_get_triggers_for_resourceplan_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_triggers_for_resourceplan", bytes); - } + sentry.commit(); + return cseqid; +} - ThriftHiveMetastore_get_triggers_for_resourceplan_result result; - try { - iface_->get_triggers_for_resourceplan(result.success, args.request); - result.__isset.success = true; - } catch (NoSuchObjectException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (MetaException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_triggers_for_resourceplan"); - } +void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqid) +{ - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_triggers_for_resourceplan", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_triggers_for_resourceplan"); - } + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - oprot->writeMessageBegin("get_triggers_for_resourceplan", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("alter_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_triggers_for_resourceplan", bytes); - } -} + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_alter_database_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); -::boost::shared_ptr< ::apache::thrift::TProcessor > ThriftHiveMetastoreProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) { - ::apache::thrift::ReleaseHandler< ThriftHiveMetastoreIfFactory > cleanup(handlerFactory_); - ::boost::shared_ptr< ThriftHiveMetastoreIf > handler(handlerFactory_->getHandler(connInfo), cleanup); - ::boost::shared_ptr< ::apache::thrift::TProcessor > processor(new ThriftHiveMetastoreProcessor(handler)); - return processor; + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::getMetaConf(std::string& _return, const std::string& key) +void ThriftHiveMetastoreConcurrentClient::get_type(Type& _return, const std::string& name) { - int32_t seqid = send_getMetaConf(key); - recv_getMetaConf(_return, seqid); + int32_t seqid = send_get_type(name); + recv_get_type(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_getMetaConf(const std::string& key) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_type(const std::string& name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("getMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_getMetaConf_pargs args; - args.key = &key; + ThriftHiveMetastore_get_type_pargs args; + args.name = &name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63818,7 +66488,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_getMetaConf(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -63847,7 +66517,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("getMetaConf") != 0) { + if (fname.compare("get_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63856,7 +66526,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_getMetaConf_presult result; + ThriftHiveMetastore_get_type_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -63871,8 +66541,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getMetaConf failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63882,21 +66556,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::setMetaConf(const std::string& key, const std::string& value) +bool ThriftHiveMetastoreConcurrentClient::create_type(const Type& type) { - int32_t seqid = send_setMetaConf(key, value); - recv_setMetaConf(seqid); + int32_t seqid = send_create_type(type); + return recv_create_type(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_setMetaConf(const std::string& key, const std::string& value) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_type(const Type& type) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("setMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_setMetaConf_pargs args; - args.key = &key; - args.value = &value; + ThriftHiveMetastore_create_type_pargs args; + args.type = &type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63907,7 +66580,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_setMetaConf(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) { int32_t rseqid = 0; @@ -63936,7 +66609,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("setMetaConf") != 0) { + if (fname.compare("create_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63945,17 +66618,31 @@ void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_setMetaConf_presult result; + bool _return; + ThriftHiveMetastore_create_type_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + sentry.commit(); + return _return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - sentry.commit(); - return; + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_type failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63965,20 +66652,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_database(const Database& database) +bool ThriftHiveMetastoreConcurrentClient::drop_type(const std::string& type) { - int32_t seqid = send_create_database(database); - recv_create_database(seqid); + int32_t seqid = send_drop_type(type); + return recv_drop_type(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_database(const Database& database) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_type(const std::string& type) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_database", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_database_pargs args; - args.database = &database; + ThriftHiveMetastore_drop_type_pargs args; + args.type = &type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63989,7 +66676,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_database(const Database return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) { int32_t rseqid = 0; @@ -64018,7 +66705,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_database") != 0) { + if (fname.compare("drop_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64027,11 +66714,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_database_presult result; + bool _return; + ThriftHiveMetastore_drop_type_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + sentry.commit(); + return _return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -64040,12 +66733,96 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seq sentry.commit(); throw result.o2; } - if (result.__isset.o3) { + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_type failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_type_all(std::map & _return, const std::string& name) +{ + int32_t seqid = send_get_type_all(name); + recv_get_type_all(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_type_all(const std::string& name) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_type_all", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_type_all_pargs args; + args.name = &name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::map & _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); sentry.commit(); - throw result.o3; + throw x; } - sentry.commit(); - return; + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_type_all") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_type_all_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type_all failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64055,20 +66832,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_database(Database& _return, const std::string& name) +void ThriftHiveMetastoreConcurrentClient::get_fields(std::vector & _return, const std::string& db_name, const std::string& table_name) { - int32_t seqid = send_get_database(name); - recv_get_database(_return, seqid); + int32_t seqid = send_get_fields(db_name, table_name); + recv_get_fields(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_database(const std::string& name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields(const std::string& db_name, const std::string& table_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_database", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_fields", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_database_pargs args; - args.name = &name; + ThriftHiveMetastore_get_fields_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64079,7 +66857,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_database(const std::string return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64108,7 +66886,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_database") != 0) { + if (fname.compare("get_fields") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64117,7 +66895,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_database_presult result; + ThriftHiveMetastore_get_fields_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -64136,8 +66914,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_database failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_fields failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64147,22 +66929,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_database(const std::string& name, const bool deleteData, const bool cascade) +void ThriftHiveMetastoreConcurrentClient::get_fields_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { - int32_t seqid = send_drop_database(name, deleteData, cascade); - recv_drop_database(seqid); + int32_t seqid = send_get_fields_with_environment_context(db_name, table_name, environment_context); + recv_get_fields_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_database(const std::string& name, const bool deleteData, const bool cascade) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_fields_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_database_pargs args; - args.name = &name; - args.deleteData = &deleteData; - args.cascade = &cascade; + ThriftHiveMetastore_get_fields_with_environment_context_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64173,7 +66955,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_database(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_context(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64202,7 +66984,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_database") != 0) { + if (fname.compare("get_fields_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64211,11 +66993,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_database_presult result; + ThriftHiveMetastore_get_fields_with_environment_context_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -64228,8 +67016,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid sentry.commit(); throw result.o3; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_fields_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64239,20 +67027,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_databases(std::vector & _return, const std::string& pattern) +void ThriftHiveMetastoreConcurrentClient::get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name) { - int32_t seqid = send_get_databases(pattern); - recv_get_databases(_return, seqid); + int32_t seqid = send_get_schema(db_name, table_name); + recv_get_schema(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_databases(const std::string& pattern) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema(const std::string& db_name, const std::string& table_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_schema", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_databases_pargs args; - args.pattern = &pattern; + ThriftHiveMetastore_get_schema_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64263,7 +67052,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_databases(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64292,7 +67081,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_databases") != 0) { + if (fname.compare("get_schema") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64301,7 +67090,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vectorreadMessageEnd(); @@ -64316,8 +67105,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -64327,19 +67124,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vector & _return) +void ThriftHiveMetastoreConcurrentClient::get_schema_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_all_databases(); - recv_get_all_databases(_return, seqid); + int32_t seqid = send_get_schema_with_environment_context(db_name, table_name, environment_context); + recv_get_schema_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_databases() +int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_schema_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_databases_pargs args; + ThriftHiveMetastore_get_schema_with_environment_context_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64350,7 +67150,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_databases() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_context(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64379,7 +67179,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_databases") != 0) { + if (fname.compare("get_schema_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64388,7 +67188,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vectorreadMessageEnd(); @@ -64403,8 +67203,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -64414,21 +67222,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_database", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_database_pargs args; - args.dbname = &dbname; - args.db = &db; + ThriftHiveMetastore_create_table_pargs args; + args.tbl = &tbl; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64439,7 +67246,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_database(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) { int32_t rseqid = 0; @@ -64468,7 +67275,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqi iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_database") != 0) { + if (fname.compare("create_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64477,7 +67284,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqi using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_database_presult result; + ThriftHiveMetastore_create_table_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64490,6 +67297,14 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqi sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } sentry.commit(); return; } @@ -64501,20 +67316,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqi } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_type(Type& _return, const std::string& name) +void ThriftHiveMetastoreConcurrentClient::create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_type(name); - recv_get_type(_return, seqid); + int32_t seqid = send_create_table_with_environment_context(tbl, environment_context); + recv_create_table_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_type(const std::string& name) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_type_pargs args; - args.name = &name; + ThriftHiveMetastore_create_table_with_environment_context_pargs args; + args.tbl = &tbl; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64525,7 +67341,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_type(const std::string& na return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -64554,7 +67370,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_type") != 0) { + if (fname.compare("create_table_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64563,17 +67379,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_type_presult result; - result.success = &_return; + ThriftHiveMetastore_create_table_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -64582,8 +67392,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type failed: unknown result"); + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64593,20 +67411,24 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::create_type(const Type& type) +void ThriftHiveMetastoreConcurrentClient::create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) { - int32_t seqid = send_create_type(type); - return recv_create_type(seqid); + int32_t seqid = send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); + recv_create_table_with_constraints(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_type(const Type& type) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_type_pargs args; - args.type = &type; + ThriftHiveMetastore_create_table_with_constraints_pargs args; + args.tbl = &tbl; + args.primaryKeys = &primaryKeys; + args.foreignKeys = &foreignKeys; + args.uniqueConstraints = &uniqueConstraints; + args.notNullConstraints = ¬NullConstraints; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64617,7 +67439,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_type(const Type& type) return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(const int32_t seqid) { int32_t rseqid = 0; @@ -64646,7 +67468,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_type") != 0) { + if (fname.compare("create_table_with_constraints") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64655,17 +67477,11 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_create_type_presult result; - result.success = &_return; + ThriftHiveMetastore_create_table_with_constraints_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - sentry.commit(); - return _return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -64678,8 +67494,12 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) sentry.commit(); throw result.o3; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_type failed: unknown result"); + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64689,20 +67509,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_type(const std::string& type) +void ThriftHiveMetastoreConcurrentClient::drop_constraint(const DropConstraintRequest& req) { - int32_t seqid = send_drop_type(type); - return recv_drop_type(seqid); + int32_t seqid = send_drop_constraint(req); + recv_drop_constraint(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_type(const std::string& type) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_constraint(const DropConstraintRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_type_pargs args; - args.type = &type; + ThriftHiveMetastore_drop_constraint_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64713,7 +67533,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_type(const std::string& t return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seqid) { int32_t rseqid = 0; @@ -64742,7 +67562,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_type") != 0) { + if (fname.compare("drop_constraint") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64751,27 +67571,21 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_type_presult result; - result.success = &_return; + ThriftHiveMetastore_drop_constraint_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - sentry.commit(); - return _return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { + if (result.__isset.o3) { sentry.commit(); - throw result.o2; + throw result.o3; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_type failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64781,20 +67595,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_type_all(std::map & _return, const std::string& name) +void ThriftHiveMetastoreConcurrentClient::add_primary_key(const AddPrimaryKeyRequest& req) { - int32_t seqid = send_get_type_all(name); - recv_get_type_all(_return, seqid); + int32_t seqid = send_add_primary_key(req); + recv_add_primary_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_type_all(const std::string& name) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_primary_key(const AddPrimaryKeyRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_type_all", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_primary_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_type_all_pargs args; - args.name = &name; + ThriftHiveMetastore_add_primary_key_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64805,7 +67619,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_type_all(const std::string return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::map & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seqid) { int32_t rseqid = 0; @@ -64834,7 +67648,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::mapreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_type_all") != 0) { + if (fname.compare("add_primary_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64843,23 +67657,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::mapreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } if (result.__isset.o2) { sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type_all failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64869,21 +67681,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::map & _return, const std::string& db_name, const std::string& table_name) +void ThriftHiveMetastoreConcurrentClient::add_foreign_key(const AddForeignKeyRequest& req) { - int32_t seqid = send_get_fields(db_name, table_name); - recv_get_fields(_return, seqid); + int32_t seqid = send_add_foreign_key(req); + recv_add_foreign_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields(const std::string& db_name, const std::string& table_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_foreign_key(const AddForeignKeyRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_fields", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_foreign_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_fields_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; + ThriftHiveMetastore_add_foreign_key_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64894,7 +67705,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seqid) { int32_t rseqid = 0; @@ -64923,7 +67734,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_fields") != 0) { + if (fname.compare("add_foreign_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64932,17 +67743,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -64951,12 +67756,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -64966,22 +67767,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::add_unique_constraint(const AddUniqueConstraintRequest& req) { - int32_t seqid = send_get_fields_with_environment_context(db_name, table_name, environment_context); - recv_get_fields_with_environment_context(_return, seqid); + int32_t seqid = send_add_unique_constraint(req); + recv_add_unique_constraint(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_unique_constraint(const AddUniqueConstraintRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_fields_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_unique_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_fields_with_environment_context_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; - args.environment_context = &environment_context; + ThriftHiveMetastore_add_unique_constraint_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64992,7 +67791,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields_with_environment_co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_context(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32_t seqid) { int32_t rseqid = 0; @@ -65021,7 +67820,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_fields_with_environment_context") != 0) { + if (fname.compare("add_unique_constraint") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65030,17 +67829,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_fields_with_environment_context_presult result; - result.success = &_return; + ThriftHiveMetastore_add_unique_constraint_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -65049,12 +67842,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_fields_with_environment_context failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65064,21 +67853,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name) +void ThriftHiveMetastoreConcurrentClient::add_not_null_constraint(const AddNotNullConstraintRequest& req) { - int32_t seqid = send_get_schema(db_name, table_name); - recv_get_schema(_return, seqid); + int32_t seqid = send_add_not_null_constraint(req); + recv_add_not_null_constraint(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema(const std::string& db_name, const std::string& table_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_not_null_constraint(const AddNotNullConstraintRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_schema", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_not_null_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_schema_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; + ThriftHiveMetastore_add_not_null_constraint_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65089,7 +67877,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int32_t seqid) { int32_t rseqid = 0; @@ -65118,7 +67906,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_schema") != 0) { + if (fname.compare("add_not_null_constraint") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65127,17 +67915,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -65146,12 +67928,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -65161,22 +67939,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::drop_table(const std::string& dbname, const std::string& name, const bool deleteData) { - int32_t seqid = send_get_schema_with_environment_context(db_name, table_name, environment_context); - recv_get_schema_with_environment_context(_return, seqid); + int32_t seqid = send_drop_table(dbname, name, deleteData); + recv_drop_table(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_schema_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_schema_with_environment_context_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; - args.environment_context = &environment_context; + ThriftHiveMetastore_drop_table_pargs args; + args.dbname = &dbname; + args.name = &name; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65187,7 +67965,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema_with_environment_co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_context(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) { int32_t rseqid = 0; @@ -65216,7 +67994,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_schema_with_environment_context") != 0) { + if (fname.compare("drop_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65225,31 +68003,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_schema_with_environment_context_presult result; - result.success = &_return; + ThriftHiveMetastore_drop_table_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } if (result.__isset.o3) { sentry.commit(); throw result.o3; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_schema_with_environment_context failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65259,20 +68027,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_table(const Table& tbl) +void ThriftHiveMetastoreConcurrentClient::drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) { - int32_t seqid = send_create_table(tbl); - recv_create_table(seqid); + int32_t seqid = send_drop_table_with_environment_context(dbname, name, deleteData, environment_context); + recv_drop_table_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_table(const Table& tbl) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_pargs args; - args.tbl = &tbl; + ThriftHiveMetastore_drop_table_with_environment_context_pargs args; + args.dbname = &dbname; + args.name = &name; + args.deleteData = &deleteData; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65283,7 +68054,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_table(const Table& tbl) return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -65312,7 +68083,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table") != 0) { + if (fname.compare("drop_table_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65321,7 +68092,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_table_presult result; + ThriftHiveMetastore_drop_table_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65330,18 +68101,10 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } if (result.__isset.o3) { sentry.commit(); throw result.o3; } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } sentry.commit(); return; } @@ -65353,21 +68116,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames) { - int32_t seqid = send_create_table_with_environment_context(tbl, environment_context); - recv_create_table_with_environment_context(seqid); + int32_t seqid = send_truncate_table(dbName, tableName, partNames); + recv_truncate_table(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("truncate_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_with_environment_context_pargs args; - args.tbl = &tbl; - args.environment_context = &environment_context; + ThriftHiveMetastore_truncate_table_pargs args; + args.dbName = &dbName; + args.tableName = &tableName; + args.partNames = &partNames; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65378,7 +68142,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_environment_ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_truncate_table(const int32_t seqid) { int32_t rseqid = 0; @@ -65407,7 +68171,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table_with_environment_context") != 0) { + if (fname.compare("truncate_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65416,7 +68180,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_table_with_environment_context_presult result; + ThriftHiveMetastore_truncate_table_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65425,18 +68189,6 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } sentry.commit(); return; } @@ -65448,24 +68200,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) +void ThriftHiveMetastoreConcurrentClient::get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern) { - int32_t seqid = send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); - recv_create_table_with_constraints(seqid); + int32_t seqid = send_get_tables(db_name, pattern); + recv_get_tables(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables(const std::string& db_name, const std::string& pattern) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_tables", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_with_constraints_pargs args; - args.tbl = &tbl; - args.primaryKeys = &primaryKeys; - args.foreignKeys = &foreignKeys; - args.uniqueConstraints = &uniqueConstraints; - args.notNullConstraints = ¬NullConstraints; + ThriftHiveMetastore_get_tables_pargs args; + args.db_name = &db_name; + args.pattern = &pattern; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65476,7 +68225,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_constraints( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65505,7 +68254,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(con iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table_with_constraints") != 0) { + if (fname.compare("get_tables") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65514,29 +68263,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(con using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_table_with_constraints_presult result; + ThriftHiveMetastore_get_tables_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o3; + return; } - if (result.__isset.o4) { + if (result.__isset.o1) { sentry.commit(); - throw result.o4; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_tables failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65546,20 +68289,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(con } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_constraint(const DropConstraintRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_tables_by_type(std::vector & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType) { - int32_t seqid = send_drop_constraint(req); - recv_drop_constraint(seqid); + int32_t seqid = send_get_tables_by_type(db_name, pattern, tableType); + recv_get_tables_by_type(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_constraint(const DropConstraintRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_constraint", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_tables_by_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_constraint_pargs args; - args.req = &req; + ThriftHiveMetastore_get_tables_by_type_pargs args; + args.db_name = &db_name; + args.pattern = &pattern; + args.tableType = &tableType; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65570,7 +68315,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_constraint(const DropCons return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65599,7 +68344,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_constraint") != 0) { + if (fname.compare("get_tables_by_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65608,21 +68353,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_constraint_presult result; + ThriftHiveMetastore_get_tables_by_type_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o3) { + if (result.__isset.o1) { sentry.commit(); - throw result.o3; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_tables_by_type failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65632,20 +68379,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_primary_key(const AddPrimaryKeyRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_table_meta(std::vector & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) { - int32_t seqid = send_add_primary_key(req); - recv_add_primary_key(seqid); + int32_t seqid = send_get_table_meta(db_patterns, tbl_patterns, tbl_types); + recv_get_table_meta(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_primary_key(const AddPrimaryKeyRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_primary_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_meta", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_primary_key_pargs args; - args.req = &req; + ThriftHiveMetastore_get_table_meta_pargs args; + args.db_patterns = &db_patterns; + args.tbl_patterns = &tbl_patterns; + args.tbl_types = &tbl_types; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65656,7 +68405,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_primary_key(const AddPrima return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65685,7 +68434,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_primary_key") != 0) { + if (fname.compare("get_table_meta") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65694,21 +68443,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_primary_key_presult result; + ThriftHiveMetastore_get_table_meta_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_meta failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65718,20 +68469,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_foreign_key(const AddForeignKeyRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_all_tables(std::vector & _return, const std::string& db_name) { - int32_t seqid = send_add_foreign_key(req); - recv_add_foreign_key(seqid); + int32_t seqid = send_get_all_tables(db_name); + recv_get_all_tables(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_foreign_key(const AddForeignKeyRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_tables(const std::string& db_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_foreign_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_tables", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_foreign_key_pargs args; - args.req = &req; + ThriftHiveMetastore_get_all_tables_pargs args; + args.db_name = &db_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65742,7 +68493,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_foreign_key(const AddForei return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65771,7 +68522,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_foreign_key") != 0) { + if (fname.compare("get_all_tables") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65780,21 +68531,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_foreign_key_presult result; + ThriftHiveMetastore_get_all_tables_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_tables failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65804,20 +68557,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_unique_constraint(const AddUniqueConstraintRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_table(Table& _return, const std::string& dbname, const std::string& tbl_name) { - int32_t seqid = send_add_unique_constraint(req); - recv_add_unique_constraint(seqid); + int32_t seqid = send_get_table(dbname, tbl_name); + recv_get_table(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_unique_constraint(const AddUniqueConstraintRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table(const std::string& dbname, const std::string& tbl_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_unique_constraint", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_unique_constraint_pargs args; - args.req = &req; + ThriftHiveMetastore_get_table_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65828,7 +68582,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_unique_constraint(const Ad return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65857,7 +68611,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32 iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_unique_constraint") != 0) { + if (fname.compare("get_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65866,11 +68620,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32 using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_unique_constraint_presult result; + ThriftHiveMetastore_get_table_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -65879,8 +68639,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32 sentry.commit(); throw result.o2; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65890,20 +68650,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32 } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_not_null_constraint(const AddNotNullConstraintRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_table_objects_by_name(std::vector
& _return, const std::string& dbname, const std::vector & tbl_names) { - int32_t seqid = send_add_not_null_constraint(req); - recv_add_not_null_constraint(seqid); + int32_t seqid = send_get_table_objects_by_name(dbname, tbl_names); + recv_get_table_objects_by_name(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_not_null_constraint(const AddNotNullConstraintRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name(const std::string& dbname, const std::vector & tbl_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_not_null_constraint", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_objects_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_not_null_constraint_pargs args; - args.req = &req; + ThriftHiveMetastore_get_table_objects_by_name_pargs args; + args.dbname = &dbname; + args.tbl_names = &tbl_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65914,7 +68675,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_not_null_constraint(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::vector
& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65943,7 +68704,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_not_null_constraint") != 0) { + if (fname.compare("get_table_objects_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65952,21 +68713,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_not_null_constraint_presult result; + ThriftHiveMetastore_get_table_objects_by_name_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o2; + return; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_objects_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65976,22 +68735,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_table(const std::string& dbname, const std::string& name, const bool deleteData) +void ThriftHiveMetastoreConcurrentClient::get_table_req(GetTableResult& _return, const GetTableRequest& req) { - int32_t seqid = send_drop_table(dbname, name, deleteData); - recv_drop_table(seqid); + int32_t seqid = send_get_table_req(req); + recv_get_table_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_req(const GetTableRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_table_pargs args; - args.dbname = &dbname; - args.name = &name; - args.deleteData = &deleteData; + ThriftHiveMetastore_get_table_req_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66002,7 +68759,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_req(GetTableResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66031,7 +68788,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_table") != 0) { + if (fname.compare("get_table_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66040,21 +68797,27 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_table_presult result; + ThriftHiveMetastore_get_table_req_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o3) { + if (result.__isset.o2) { sentry.commit(); - throw result.o3; + throw result.o2; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66064,23 +68827,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req) { - int32_t seqid = send_drop_table_with_environment_context(dbname, name, deleteData, environment_context); - recv_drop_table_with_environment_context(seqid); + int32_t seqid = send_get_table_objects_by_name_req(req); + recv_get_table_objects_by_name_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name_req(const GetTablesRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_objects_by_name_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_table_with_environment_context_pargs args; - args.dbname = &dbname; - args.name = &name; - args.deleteData = &deleteData; - args.environment_context = &environment_context; + ThriftHiveMetastore_get_table_objects_by_name_req_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66091,7 +68851,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table_with_environment_co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name_req(GetTablesResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66120,7 +68880,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_conte iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_table_with_environment_context") != 0) { + if (fname.compare("get_table_objects_by_name_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66129,21 +68889,31 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_conte using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_table_with_environment_context_presult result; + ThriftHiveMetastore_get_table_objects_by_name_req_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } if (result.__isset.o3) { sentry.commit(); throw result.o3; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_objects_by_name_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66153,22 +68923,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_conte } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames) +void ThriftHiveMetastoreConcurrentClient::get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) { - int32_t seqid = send_truncate_table(dbName, tableName, partNames); - recv_truncate_table(seqid); + int32_t seqid = send_get_table_names_by_filter(dbname, filter, max_tables); + recv_get_table_names_by_filter(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("truncate_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_names_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_truncate_table_pargs args; - args.dbName = &dbName; - args.tableName = &tableName; - args.partNames = &partNames; + ThriftHiveMetastore_get_table_names_by_filter_pargs args; + args.dbname = &dbname; + args.filter = &filter; + args.max_tables = &max_tables; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66179,7 +68949,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_truncate_table(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_truncate_table(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66208,7 +68978,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_truncate_table(const int32_t seqi iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("truncate_table") != 0) { + if (fname.compare("get_table_names_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66217,17 +68987,31 @@ void ThriftHiveMetastoreConcurrentClient::recv_truncate_table(const int32_t seqi using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_truncate_table_presult result; + ThriftHiveMetastore_get_table_names_by_filter_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - sentry.commit(); - return; + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_names_by_filter failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66237,21 +69021,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_truncate_table(const int32_t seqi } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern) +void ThriftHiveMetastoreConcurrentClient::alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) { - int32_t seqid = send_get_tables(db_name, pattern); - recv_get_tables(_return, seqid); + int32_t seqid = send_alter_table(dbname, tbl_name, new_tbl); + recv_alter_table(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables(const std::string& db_name, const std::string& pattern) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_tables", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_tables_pargs args; - args.db_name = &db_name; - args.pattern = &pattern; + ThriftHiveMetastore_alter_table_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; + args.new_tbl = &new_tbl; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66262,7 +69047,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) { int32_t rseqid = 0; @@ -66291,7 +69076,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_tables") != 0) { + if (fname.compare("alter_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66300,23 +69085,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_tables failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66326,22 +69109,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType) +void ThriftHiveMetastoreConcurrentClient::alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_tables_by_type(db_name, pattern, tableType); - recv_get_tables_by_type(_return, seqid); + int32_t seqid = send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context); + recv_alter_table_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_tables_by_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_tables_by_type_pargs args; - args.db_name = &db_name; - args.pattern = &pattern; - args.tableType = &tableType; + ThriftHiveMetastore_alter_table_with_environment_context_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; + args.new_tbl = &new_tbl; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66352,7 +69136,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables_by_type(const std:: return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -66381,7 +69165,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_tables_by_type") != 0) { + if (fname.compare("alter_table_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66390,23 +69174,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_tables_by_type failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66416,22 +69198,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vector & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) +void ThriftHiveMetastoreConcurrentClient::alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) { - int32_t seqid = send_get_table_meta(db_patterns, tbl_patterns, tbl_types); - recv_get_table_meta(_return, seqid); + int32_t seqid = send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade); + recv_alter_table_with_cascade(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_meta", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_meta_pargs args; - args.db_patterns = &db_patterns; - args.tbl_patterns = &tbl_patterns; - args.tbl_types = &tbl_types; + ThriftHiveMetastore_alter_table_with_cascade_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; + args.new_tbl = &new_tbl; + args.cascade = &cascade; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66442,7 +69225,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_meta(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const int32_t seqid) { int32_t rseqid = 0; @@ -66471,7 +69254,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_meta") != 0) { + if (fname.compare("alter_table_with_cascade") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66480,23 +69263,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_meta failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66506,20 +69287,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vector & _return, const std::string& db_name) +void ThriftHiveMetastoreConcurrentClient::add_partition(Partition& _return, const Partition& new_part) { - int32_t seqid = send_get_all_tables(db_name); - recv_get_all_tables(_return, seqid); + int32_t seqid = send_add_partition(new_part); + recv_add_partition(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_tables(const std::string& db_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition(const Partition& new_part) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_all_tables", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_tables_pargs args; - args.db_name = &db_name; + ThriftHiveMetastore_add_partition_pargs args; + args.new_part = &new_part; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66530,7 +69311,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_tables(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66559,7 +69340,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_tables") != 0) { + if (fname.compare("add_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66568,7 +69349,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vectorreadMessageEnd(); @@ -66583,8 +69364,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -66594,21 +69383,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_pargs args; - args.dbname = &dbname; - args.tbl_name = &tbl_name; + ThriftHiveMetastore_add_partition_with_environment_context_pargs args; + args.new_part = &new_part; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66619,7 +69408,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table(const std::string& d return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_context(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66648,7 +69437,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const i iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table") != 0) { + if (fname.compare("add_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66657,7 +69446,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const i using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_presult result; + ThriftHiveMetastore_add_partition_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -66676,8 +69465,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const i sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66687,21 +69480,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const i } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_objects_by_name(std::vector
& _return, const std::string& dbname, const std::vector & tbl_names) +int32_t ThriftHiveMetastoreConcurrentClient::add_partitions(const std::vector & new_parts) { - int32_t seqid = send_get_table_objects_by_name(dbname, tbl_names); - recv_get_table_objects_by_name(_return, seqid); + int32_t seqid = send_add_partitions(new_parts); + return recv_add_partitions(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name(const std::string& dbname, const std::vector & tbl_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions(const std::vector & new_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_objects_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_objects_by_name_pargs args; - args.dbname = &dbname; - args.tbl_names = &tbl_names; + ThriftHiveMetastore_add_partitions_pargs args; + args.new_parts = &new_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66712,7 +69504,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name(cons return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::vector
& _return, const int32_t seqid) +int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t seqid) { int32_t rseqid = 0; @@ -66741,7 +69533,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::ve iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_objects_by_name") != 0) { + if (fname.compare("add_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66750,19 +69542,31 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::ve using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_objects_by_name_presult result; + int32_t _return; + ThriftHiveMetastore_add_partitions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_objects_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66772,20 +69576,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::ve } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_req(GetTableResult& _return, const GetTableRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::add_partitions_pspec(const std::vector & new_parts) { - int32_t seqid = send_get_table_req(req); - recv_get_table_req(_return, seqid); + int32_t seqid = send_add_partitions_pspec(new_parts); + return recv_add_partitions_pspec(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_req(const GetTableRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_pspec(const std::vector & new_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_req_pargs args; - args.req = &req; + ThriftHiveMetastore_add_partitions_pspec_pargs args; + args.new_parts = &new_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66796,7 +69600,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_req(const GetTableRe return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_req(GetTableResult& _return, const int32_t seqid) +int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int32_t seqid) { int32_t rseqid = 0; @@ -66825,7 +69629,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_req(GetTableResult& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_req") != 0) { + if (fname.compare("add_partitions_pspec") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66834,16 +69638,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_req(GetTableResult& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_req_presult result; + int32_t _return; + ThriftHiveMetastore_add_partitions_pspec_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -66853,8 +69657,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_req(GetTableResult& _re sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_req failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_pspec failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66864,20 +69672,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_req(GetTableResult& _re } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req) +void ThriftHiveMetastoreConcurrentClient::append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { - int32_t seqid = send_get_table_objects_by_name_req(req); - recv_get_table_objects_by_name_req(_return, seqid); + int32_t seqid = send_append_partition(db_name, tbl_name, part_vals); + recv_append_partition(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name_req(const GetTablesRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_objects_by_name_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_objects_by_name_req_pargs args; - args.req = &req; + ThriftHiveMetastore_append_partition_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66888,7 +69698,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name_req( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name_req(GetTablesResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66917,7 +69727,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name_req(Get iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_objects_by_name_req") != 0) { + if (fname.compare("append_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66926,7 +69736,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name_req(Get using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_objects_by_name_req_presult result; + ThriftHiveMetastore_append_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -66950,7 +69760,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name_req(Get throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_objects_by_name_req failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66960,22 +69770,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name_req(Get } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) +void ThriftHiveMetastoreConcurrentClient::add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request) { - int32_t seqid = send_get_table_names_by_filter(dbname, filter, max_tables); - recv_get_table_names_by_filter(_return, seqid); + int32_t seqid = send_add_partitions_req(request); + recv_add_partitions_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_req(const AddPartitionsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_names_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_names_by_filter_pargs args; - args.dbname = &dbname; - args.filter = &filter; - args.max_tables = &max_tables; + ThriftHiveMetastore_add_partitions_req_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66986,7 +69794,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_names_by_filter(cons return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67015,7 +69823,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::ve iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_names_by_filter") != 0) { + if (fname.compare("add_partitions_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67024,7 +69832,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::ve using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_names_by_filter_presult result; + ThriftHiveMetastore_add_partitions_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67048,7 +69856,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::ve throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_names_by_filter failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67058,22 +69866,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::ve } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) +void ThriftHiveMetastoreConcurrentClient::append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) { - int32_t seqid = send_alter_table(dbname, tbl_name, new_tbl); - recv_alter_table(seqid); + int32_t seqid = send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context); + recv_append_partition_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) +int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_table_pargs args; - args.dbname = &dbname; + ThriftHiveMetastore_append_partition_with_environment_context_pargs args; + args.db_name = &db_name; args.tbl_name = &tbl_name; - args.new_tbl = &new_tbl; + args.part_vals = &part_vals; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67084,7 +69893,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment_context(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67113,7 +69922,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_table") != 0) { + if (fname.compare("append_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67122,11 +69931,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_table_presult result; + ThriftHiveMetastore_append_partition_with_environment_context_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -67135,8 +69950,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67146,23 +69965,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) { - int32_t seqid = send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context); - recv_alter_table_with_environment_context(seqid); + int32_t seqid = send_append_partition_by_name(db_name, tbl_name, part_name); + recv_append_partition_by_name(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_table_with_environment_context_pargs args; - args.dbname = &dbname; + ThriftHiveMetastore_append_partition_by_name_pargs args; + args.db_name = &db_name; args.tbl_name = &tbl_name; - args.new_tbl = &new_tbl; - args.environment_context = &environment_context; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67173,7 +69991,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_environment_c return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67202,7 +70020,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_cont iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_table_with_environment_context") != 0) { + if (fname.compare("append_partition_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67211,11 +70029,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_cont using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_table_with_environment_context_presult result; + ThriftHiveMetastore_append_partition_by_name_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -67224,8 +70048,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_cont sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67235,23 +70063,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_cont } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) +void ThriftHiveMetastoreConcurrentClient::append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) { - int32_t seqid = send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade); - recv_alter_table_with_cascade(seqid); + int32_t seqid = send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context); + recv_append_partition_by_name_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) +int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_table_with_cascade_pargs args; - args.dbname = &dbname; + ThriftHiveMetastore_append_partition_by_name_with_environment_context_pargs args; + args.db_name = &db_name; args.tbl_name = &tbl_name; - args.new_tbl = &new_tbl; - args.cascade = &cascade; + args.part_name = &part_name; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67262,7 +70090,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_cascade(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_environment_context(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67291,7 +70119,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const in iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_table_with_cascade") != 0) { + if (fname.compare("append_partition_by_name_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67300,11 +70128,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const in using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_table_with_cascade_presult result; + ThriftHiveMetastore_append_partition_by_name_with_environment_context_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -67313,8 +70147,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const in sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67324,20 +70162,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const in } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_partition(Partition& _return, const Partition& new_part) +bool ThriftHiveMetastoreConcurrentClient::drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) { - int32_t seqid = send_add_partition(new_part); - recv_add_partition(_return, seqid); + int32_t seqid = send_drop_partition(db_name, tbl_name, part_vals, deleteData); + return recv_drop_partition(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition(const Partition& new_part) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partition_pargs args; - args.new_part = &new_part; + ThriftHiveMetastore_drop_partition_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67348,7 +70189,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition(const Partition& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqid) { int32_t rseqid = 0; @@ -67377,7 +70218,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partition") != 0) { + if (fname.compare("drop_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67386,16 +70227,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_partition_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -67405,12 +70246,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67420,20 +70257,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context) +bool ThriftHiveMetastoreConcurrentClient::drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) { - int32_t seqid = send_add_partition_with_environment_context(new_part, environment_context); - recv_add_partition_with_environment_context(_return, seqid); + int32_t seqid = send_drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, environment_context); + return recv_drop_partition_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition_with_environment_context(const Partition& new_part, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partition_with_environment_context_pargs args; - args.new_part = &new_part; + ThriftHiveMetastore_drop_partition_with_environment_context_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.deleteData = &deleteData; args.environment_context = &environment_context; args.write(oprot_); @@ -67445,7 +70285,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition_with_environment return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_context(Partition& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -67474,7 +70314,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_co iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partition_with_environment_context") != 0) { + if (fname.compare("drop_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67483,16 +70323,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_co using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_partition_with_environment_context_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -67502,12 +70342,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_co sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67517,20 +70353,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_co } // end while(true) } -int32_t ThriftHiveMetastoreConcurrentClient::add_partitions(const std::vector & new_parts) +bool ThriftHiveMetastoreConcurrentClient::drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) { - int32_t seqid = send_add_partitions(new_parts); - return recv_add_partitions(seqid); + int32_t seqid = send_drop_partition_by_name(db_name, tbl_name, part_name, deleteData); + return recv_drop_partition_by_name(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions(const std::vector & new_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partitions_pargs args; - args.new_parts = &new_parts; + ThriftHiveMetastore_drop_partition_by_name_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_name = &part_name; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67541,7 +70380,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions(const std::vect return cseqid; } -int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int32_t seqid) { int32_t rseqid = 0; @@ -67570,7 +70409,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partitions") != 0) { + if (fname.compare("drop_partition_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67579,8 +70418,8 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t s using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int32_t _return; - ThriftHiveMetastore_add_partitions_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67598,12 +70437,8 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t s sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67613,20 +70448,24 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t s } // end while(true) } -int32_t ThriftHiveMetastoreConcurrentClient::add_partitions_pspec(const std::vector & new_parts) +bool ThriftHiveMetastoreConcurrentClient::drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) { - int32_t seqid = send_add_partitions_pspec(new_parts); - return recv_add_partitions_pspec(seqid); + int32_t seqid = send_drop_partition_by_name_with_environment_context(db_name, tbl_name, part_name, deleteData, environment_context); + return recv_drop_partition_by_name_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_pspec(const std::vector & new_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partitions_pspec_pargs args; - args.new_parts = &new_parts; + ThriftHiveMetastore_drop_partition_by_name_with_environment_context_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_name = &part_name; + args.deleteData = &deleteData; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67637,7 +70476,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_pspec(const std return cseqid; } -int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -67666,7 +70505,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partitions_pspec") != 0) { + if (fname.compare("drop_partition_by_name_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67675,8 +70514,8 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int32_t _return; - ThriftHiveMetastore_add_partitions_pspec_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_by_name_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67694,12 +70533,8 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_pspec failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_by_name_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67709,22 +70544,20 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +void ThriftHiveMetastoreConcurrentClient::drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req) { - int32_t seqid = send_append_partition(db_name, tbl_name, part_vals); - recv_append_partition(_return, seqid); + int32_t seqid = send_drop_partitions_req(req); + recv_drop_partitions_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partitions_req(const DropPartitionsRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("append_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; + ThriftHiveMetastore_drop_partitions_req_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67735,7 +70568,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition(const std::st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartitionsResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67764,7 +70597,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _retu iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition") != 0) { + if (fname.compare("drop_partitions_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67773,7 +70606,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _retu using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_append_partition_presult result; + ThriftHiveMetastore_drop_partitions_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67792,12 +70625,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _retu sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partitions_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67807,20 +70636,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _retu } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { - int32_t seqid = send_add_partitions_req(request); - recv_add_partitions_req(_return, seqid); + int32_t seqid = send_get_partition(db_name, tbl_name, part_vals); + recv_get_partition(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_req(const AddPartitionsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partitions_req_pargs args; - args.request = &request; + ThriftHiveMetastore_get_partition_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67831,7 +70662,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_req(const AddPa return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67860,7 +70691,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsR iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partitions_req") != 0) { + if (fname.compare("get_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67869,7 +70700,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsR using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_partitions_req_presult result; + ThriftHiveMetastore_get_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67888,12 +70719,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsR sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_req failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67903,23 +70730,24 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsR } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::exchange_partition(Partition& _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) { - int32_t seqid = send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context); - recv_append_partition_with_environment_context(_return, seqid); + int32_t seqid = send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); + recv_exchange_partition(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partition(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("append_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("exchange_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_with_environment_context_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.environment_context = &environment_context; + ThriftHiveMetastore_exchange_partition_pargs args; + args.partitionSpecs = &partitionSpecs; + args.source_db = &source_db; + args.source_table_name = &source_table_name; + args.dest_db = &dest_db; + args.dest_table_name = &dest_table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67930,7 +70758,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_with_environm return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment_context(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67959,7 +70787,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition_with_environment_context") != 0) { + if (fname.compare("exchange_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67968,7 +70796,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_append_partition_with_environment_context_presult result; + ThriftHiveMetastore_exchange_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67991,8 +70819,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment sentry.commit(); throw result.o3; } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "exchange_partition failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68002,22 +70834,24 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +void ThriftHiveMetastoreConcurrentClient::exchange_partitions(std::vector & _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) { - int32_t seqid = send_append_partition_by_name(db_name, tbl_name, part_name); - recv_append_partition_by_name(_return, seqid); + int32_t seqid = send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); + recv_exchange_partitions(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partitions(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("append_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("exchange_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_by_name_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_name = &part_name; + ThriftHiveMetastore_exchange_partitions_pargs args; + args.partitionSpecs = &partitionSpecs; + args.source_db = &source_db; + args.source_table_name = &source_table_name; + args.dest_db = &dest_db; + args.dest_table_name = &dest_table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68028,7 +70862,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68057,7 +70891,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partitio iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition_by_name") != 0) { + if (fname.compare("exchange_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68066,7 +70900,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partitio using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_append_partition_by_name_presult result; + ThriftHiveMetastore_exchange_partitions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68089,8 +70923,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partitio sentry.commit(); throw result.o3; } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "exchange_partitions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68100,23 +70938,24 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partitio } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names) { - int32_t seqid = send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context); - recv_append_partition_by_name_with_environment_context(_return, seqid); + int32_t seqid = send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names); + recv_get_partition_with_auth(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("append_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_by_name_with_environment_context_pargs args; + ThriftHiveMetastore_get_partition_with_auth_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.environment_context = &environment_context; + args.part_vals = &part_vals; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68127,7 +70966,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name_with_ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_environment_context(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68156,7 +70995,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_env iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition_by_name_with_environment_context") != 0) { + if (fname.compare("get_partition_with_auth") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68165,7 +71004,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_env using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_append_partition_by_name_with_environment_context_presult result; + ThriftHiveMetastore_get_partition_with_auth_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68184,12 +71023,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_env sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_with_auth failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68199,23 +71034,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_env } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) +void ThriftHiveMetastoreConcurrentClient::get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) { - int32_t seqid = send_drop_partition(db_name, tbl_name, part_vals, deleteData); - return recv_drop_partition(seqid); + int32_t seqid = send_get_partition_by_name(db_name, tbl_name, part_name); + recv_get_partition_by_name(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_pargs args; + ThriftHiveMetastore_get_partition_by_name_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.deleteData = &deleteData; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68226,7 +71060,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition(const std::stri return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68255,7 +71089,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqi iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition") != 0) { + if (fname.compare("get_partition_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68264,16 +71098,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqi using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_partition_presult result; + ThriftHiveMetastore_get_partition_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -68284,7 +71118,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqi throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68294,24 +71128,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqi } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { - int32_t seqid = send_drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, environment_context); - return recv_drop_partition_with_environment_context(seqid); + int32_t seqid = send_get_partitions(db_name, tbl_name, max_parts); + recv_get_partitions(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_with_environment_context_pargs args; + ThriftHiveMetastore_get_partitions_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.deleteData = &deleteData; - args.environment_context = &environment_context; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68322,7 +71154,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_with_environmen return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68351,7 +71183,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition_with_environment_context") != 0) { + if (fname.compare("get_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68360,16 +71192,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_partition_with_environment_context_presult result; + ThriftHiveMetastore_get_partitions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -68380,7 +71212,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_c throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68390,23 +71222,24 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_c } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) +void ThriftHiveMetastoreConcurrentClient::get_partitions_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { - int32_t seqid = send_drop_partition_by_name(db_name, tbl_name, part_name, deleteData); - return recv_drop_partition_by_name(seqid); + int32_t seqid = send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names); + recv_get_partitions_with_auth(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_by_name_pargs args; + ThriftHiveMetastore_get_partitions_with_auth_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.deleteData = &deleteData; + args.max_parts = &max_parts; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68417,7 +71250,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name(const s return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68446,7 +71279,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int3 iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition_by_name") != 0) { + if (fname.compare("get_partitions_with_auth") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68455,16 +71288,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int3 using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_partition_by_name_presult result; + ThriftHiveMetastore_get_partitions_with_auth_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -68475,7 +71308,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int3 throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_with_auth failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68485,24 +71318,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int3 } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::get_partitions_pspec(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) { - int32_t seqid = send_drop_partition_by_name_with_environment_context(db_name, tbl_name, part_name, deleteData, environment_context); - return recv_drop_partition_by_name_with_environment_context(seqid); + int32_t seqid = send_get_partitions_pspec(db_name, tbl_name, max_parts); + recv_get_partitions_pspec(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_by_name_with_environment_context_pargs args; + ThriftHiveMetastore_get_partitions_pspec_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.deleteData = &deleteData; - args.environment_context = &environment_context; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68513,7 +71344,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name_with_en return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68542,7 +71373,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name_with_envir iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition_by_name_with_environment_context") != 0) { + if (fname.compare("get_partitions_pspec") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68551,16 +71382,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name_with_envir using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_partition_by_name_with_environment_context_presult result; + ThriftHiveMetastore_get_partitions_pspec_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -68571,7 +71402,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name_with_envir throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_by_name_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_pspec failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68581,20 +71412,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name_with_envir } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { - int32_t seqid = send_drop_partitions_req(req); - recv_drop_partitions_req(_return, seqid); + int32_t seqid = send_get_partition_names(db_name, tbl_name, max_parts); + recv_get_partition_names(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partitions_req(const DropPartitionsRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partitions_req_pargs args; - args.req = &req; + ThriftHiveMetastore_get_partition_names_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68605,7 +71438,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partitions_req(const Drop return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartitionsResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68634,7 +71467,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartition iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partitions_req") != 0) { + if (fname.compare("get_partition_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68643,7 +71476,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartition using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_partitions_req_presult result; + ThriftHiveMetastore_get_partition_names_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68663,7 +71496,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartition throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partitions_req failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68673,22 +71506,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartition } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +void ThriftHiveMetastoreConcurrentClient::get_partition_values(PartitionValuesResponse& _return, const PartitionValuesRequest& request) { - int32_t seqid = send_get_partition(db_name, tbl_name, part_vals); - recv_get_partition(_return, seqid); + int32_t seqid = send_get_partition_values(request); + recv_get_partition_values(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_values(const PartitionValuesRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_values", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; + ThriftHiveMetastore_get_partition_values_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68699,7 +71530,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_values(PartitionValuesResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68728,7 +71559,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition") != 0) { + if (fname.compare("get_partition_values") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68737,7 +71568,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_presult result; + ThriftHiveMetastore_get_partition_values_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68757,7 +71588,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_values failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68767,24 +71598,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::exchange_partition(Partition& _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +void ThriftHiveMetastoreConcurrentClient::get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { - int32_t seqid = send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); - recv_exchange_partition(_return, seqid); + int32_t seqid = send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + recv_get_partitions_ps(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partition(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("exchange_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_ps", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_exchange_partition_pargs args; - args.partitionSpecs = &partitionSpecs; - args.source_db = &source_db; - args.source_table_name = &source_table_name; - args.dest_db = &dest_db; - args.dest_table_name = &dest_table_name; + ThriftHiveMetastore_get_partitions_ps_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68795,7 +71625,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partition(const std:: return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68824,7 +71654,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("exchange_partition") != 0) { + if (fname.compare("get_partitions_ps") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68833,7 +71663,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_exchange_partition_presult result; + ThriftHiveMetastore_get_partitions_ps_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68852,16 +71682,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _re sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "exchange_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_ps failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68871,24 +71693,25 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _re } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::exchange_partitions(std::vector & _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +void ThriftHiveMetastoreConcurrentClient::get_partitions_ps_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { - int32_t seqid = send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); - recv_exchange_partitions(_return, seqid); + int32_t seqid = send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names); + recv_get_partitions_ps_with_auth(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partitions(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("exchange_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_ps_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_exchange_partitions_pargs args; - args.partitionSpecs = &partitionSpecs; - args.source_db = &source_db; - args.source_table_name = &source_table_name; - args.dest_db = &dest_db; - args.dest_table_name = &dest_table_name; + ThriftHiveMetastore_get_partitions_ps_with_auth_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.max_parts = &max_parts; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68899,7 +71722,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partitions(const std: return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68928,7 +71751,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector

readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("exchange_partitions") != 0) { + if (fname.compare("get_partitions_ps_with_auth") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68937,7 +71760,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector

readMessageEnd(); @@ -68956,16 +71779,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector

sync_.updatePending(fname, mtype, rseqid); @@ -68975,24 +71790,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector

& part_vals, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreConcurrentClient::get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { - int32_t seqid = send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names); - recv_get_partition_with_auth(_return, seqid); + int32_t seqid = send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts); + recv_get_partition_names_ps(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_names_ps", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_with_auth_pargs args; + ThriftHiveMetastore_get_partition_names_ps_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.part_vals = &part_vals; - args.user_name = &user_name; - args.group_names = &group_names; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69003,7 +71817,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_with_auth(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69032,7 +71846,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_with_auth") != 0) { + if (fname.compare("get_partition_names_ps") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69041,7 +71855,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_with_auth_presult result; + ThriftHiveMetastore_get_partition_names_ps_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -69061,7 +71875,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_with_auth failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names_ps failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69071,22 +71885,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +void ThriftHiveMetastoreConcurrentClient::get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) { - int32_t seqid = send_get_partition_by_name(db_name, tbl_name, part_name); - recv_get_partition_by_name(_return, seqid); + int32_t seqid = send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts); + recv_get_partitions_by_filter(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_by_name_pargs args; + ThriftHiveMetastore_get_partitions_by_filter_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_name = &part_name; + args.filter = &filter; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69097,7 +71912,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_by_name(const st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69126,7 +71941,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_by_name") != 0) { + if (fname.compare("get_partitions_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69135,7 +71950,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_by_name_presult result; + ThriftHiveMetastore_get_partitions_by_filter_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -69155,7 +71970,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_filter failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69165,21 +71980,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) +void ThriftHiveMetastoreConcurrentClient::get_part_specs_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) { - int32_t seqid = send_get_partitions(db_name, tbl_name, max_parts); - recv_get_partitions(_return, seqid); + int32_t seqid = send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts); + recv_get_part_specs_by_filter(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_part_specs_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_pargs args; + ThriftHiveMetastore_get_part_specs_by_filter_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; + args.filter = &filter; args.max_parts = &max_parts; args.write(oprot_); @@ -69191,7 +72007,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69220,7 +72036,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions") != 0) { + if (fname.compare("get_part_specs_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69229,7 +72045,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vectorreadMessageEnd(); @@ -69249,7 +72065,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -69259,24 +72075,114 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreConcurrentClient::get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req) { - int32_t seqid = send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names); - recv_get_partitions_with_auth(_return, seqid); + int32_t seqid = send_get_partitions_by_expr(req); + recv_get_partitions_by_expr(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_expr(const PartitionsByExprRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_with_auth_pargs args; + ThriftHiveMetastore_get_partitions_by_expr_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(PartitionsByExprResult& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_partitions_by_expr") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_partitions_by_expr_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_expr failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +int32_t ThriftHiveMetastoreConcurrentClient::get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) +{ + int32_t seqid = send_get_num_partitions_by_filter(db_name, tbl_name, filter); + return recv_get_num_partitions_by_filter(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_num_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_num_partitions_by_filter_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.max_parts = &max_parts; - args.user_name = &user_name; - args.group_names = &group_names; + args.filter = &filter; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69287,7 +72193,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_with_auth(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vector & _return, const int32_t seqid) +int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(const int32_t seqid) { int32_t rseqid = 0; @@ -69316,7 +72222,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vec iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_with_auth") != 0) { + if (fname.compare("get_num_partitions_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69325,16 +72231,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vec using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_with_auth_presult result; + int32_t _return; + ThriftHiveMetastore_get_num_partitions_by_filter_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -69345,7 +72251,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vec throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_with_auth failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_num_partitions_by_filter failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69355,22 +72261,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vec } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_pspec(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) +void ThriftHiveMetastoreConcurrentClient::get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) { - int32_t seqid = send_get_partitions_pspec(db_name, tbl_name, max_parts); - recv_get_partitions_pspec(_return, seqid); + int32_t seqid = send_get_partitions_by_names(db_name, tbl_name, names); + recv_get_partitions_by_names(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_pspec_pargs args; + ThriftHiveMetastore_get_partitions_by_names_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.max_parts = &max_parts; + args.names = &names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69381,7 +72287,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_pspec(const std return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69410,7 +72316,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector< iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_pspec") != 0) { + if (fname.compare("get_partitions_by_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69419,7 +72325,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector< using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_pspec_presult result; + ThriftHiveMetastore_get_partitions_by_names_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -69439,7 +72345,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector< throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_pspec failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_names failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69449,114 +72355,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector< } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) +void ThriftHiveMetastoreConcurrentClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { - int32_t seqid = send_get_partition_names(db_name, tbl_name, max_parts); - recv_get_partition_names(_return, seqid); + int32_t seqid = send_alter_partition(db_name, tbl_name, new_part); + recv_alter_partition(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_names", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_names_pargs args; + ThriftHiveMetastore_alter_partition_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.max_parts = &max_parts; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names(std::vector & _return, const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_partition_names") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - ThriftHiveMetastore_get_partition_names_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names failed: unknown result"); - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::get_partition_values(PartitionValuesResponse& _return, const PartitionValuesRequest& request) -{ - int32_t seqid = send_get_partition_values(request); - recv_get_partition_values(_return, seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_values(const PartitionValuesRequest& request) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_values", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_get_partition_values_pargs args; - args.request = &request; + args.new_part = &new_part; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69567,7 +72381,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_values(const Par return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_values(PartitionValuesResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seqid) { int32_t rseqid = 0; @@ -69596,7 +72410,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_values(PartitionVal iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_values") != 0) { + if (fname.compare("alter_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69605,17 +72419,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_values(PartitionVal using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_values_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_partition_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -69624,8 +72432,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_values(PartitionVal sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_values failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69635,23 +72443,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_values(PartitionVal } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +void ThriftHiveMetastoreConcurrentClient::alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) { - int32_t seqid = send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts); - recv_get_partitions_ps(_return, seqid); + int32_t seqid = send_alter_partitions(db_name, tbl_name, new_parts); + recv_alter_partitions(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_ps", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_ps_pargs args; + ThriftHiveMetastore_alter_partitions_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.max_parts = &max_parts; + args.new_parts = &new_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69662,7 +72469,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps(const std::s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t seqid) { int32_t rseqid = 0; @@ -69691,7 +72498,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_ps") != 0) { + if (fname.compare("alter_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69700,17 +72507,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -69719,8 +72520,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -69730,25 +72531,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreConcurrentClient::alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names); - recv_get_partitions_ps_with_auth(_return, seqid); + int32_t seqid = send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); + recv_alter_partitions_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_ps_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_ps_with_auth_pargs args; + ThriftHiveMetastore_alter_partitions_with_environment_context_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.max_parts = &max_parts; - args.user_name = &user_name; - args.group_names = &group_names; + args.new_parts = &new_parts; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69759,7 +72558,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps_with_auth(co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -69788,7 +72587,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std:: iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_ps_with_auth") != 0) { + if (fname.compare("alter_partitions_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69797,17 +72596,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std:: using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_ps_with_auth_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_partitions_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -69816,8 +72609,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std:: sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_ps_with_auth failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69827,23 +72620,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std:: } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +void ThriftHiveMetastoreConcurrentClient::alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts); - recv_get_partition_names_ps(_return, seqid); + int32_t seqid = send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context); + recv_alter_partition_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_names_ps", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_names_ps_pargs args; + ThriftHiveMetastore_alter_partition_with_environment_context_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.max_parts = &max_parts; + args.new_part = &new_part; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69854,7 +72647,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names_ps(const s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -69883,7 +72676,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vecto iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_names_ps") != 0) { + if (fname.compare("alter_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69892,17 +72685,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vecto using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_names_ps_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_partition_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -69911,8 +72698,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vecto sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names_ps failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69922,23 +72709,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vecto } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) +void ThriftHiveMetastoreConcurrentClient::rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { - int32_t seqid = send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts); - recv_get_partitions_by_filter(_return, seqid); + int32_t seqid = send_rename_partition(db_name, tbl_name, part_vals, new_part); + recv_rename_partition(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("rename_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_by_filter_pargs args; + ThriftHiveMetastore_rename_partition_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.filter = &filter; - args.max_parts = &max_parts; + args.part_vals = &part_vals; + args.new_part = &new_part; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69949,7 +72736,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_filter(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t seqid) { int32_t rseqid = 0; @@ -69978,7 +72765,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vec iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_by_filter") != 0) { + if (fname.compare("rename_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69987,17 +72774,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vec using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_by_filter_presult result; - result.success = &_return; + ThriftHiveMetastore_rename_partition_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -70006,8 +72787,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vec sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_filter failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70017,23 +72798,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vec } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_part_specs_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) +bool ThriftHiveMetastoreConcurrentClient::partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) { - int32_t seqid = send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts); - recv_get_part_specs_by_filter(_return, seqid); + int32_t seqid = send_partition_name_has_valid_characters(part_vals, throw_exception); + return recv_partition_name_has_valid_characters(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_part_specs_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("partition_name_has_valid_characters", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_part_specs_by_filter_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.filter = &filter; - args.max_parts = &max_parts; + ThriftHiveMetastore_partition_name_has_valid_characters_pargs args; + args.part_vals = &part_vals; + args.throw_exception = &throw_exception; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70044,7 +72823,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_part_specs_by_filter(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vector & _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characters(const int32_t seqid) { int32_t rseqid = 0; @@ -70073,7 +72852,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vec iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_part_specs_by_filter") != 0) { + if (fname.compare("partition_name_has_valid_characters") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70082,27 +72861,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vec using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_part_specs_by_filter_presult result; + bool _return; + ThriftHiveMetastore_partition_name_has_valid_characters_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_part_specs_by_filter failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_has_valid_characters failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70112,20 +72887,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vec } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) { - int32_t seqid = send_get_partitions_by_expr(req); - recv_get_partitions_by_expr(_return, seqid); + int32_t seqid = send_get_config_value(name, defaultValue); + recv_get_config_value(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_expr(const PartitionsByExprRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_config_value(const std::string& name, const std::string& defaultValue) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_config_value", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_by_expr_pargs args; - args.req = &req; + ThriftHiveMetastore_get_config_value_pargs args; + args.name = &name; + args.defaultValue = &defaultValue; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70136,7 +72912,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_expr(const P return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(PartitionsByExprResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -70165,7 +72941,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(Partitions iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_by_expr") != 0) { + if (fname.compare("get_config_value") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70174,7 +72950,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(Partitions using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_by_expr_presult result; + ThriftHiveMetastore_get_config_value_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -70189,12 +72965,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(Partitions sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_expr failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_config_value failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70204,22 +72976,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(Partitions } // end while(true) } -int32_t ThriftHiveMetastoreConcurrentClient::get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) +void ThriftHiveMetastoreConcurrentClient::partition_name_to_vals(std::vector & _return, const std::string& part_name) { - int32_t seqid = send_get_num_partitions_by_filter(db_name, tbl_name, filter); - return recv_get_num_partitions_by_filter(seqid); + int32_t seqid = send_partition_name_to_vals(part_name); + recv_partition_name_to_vals(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) +int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_vals(const std::string& part_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_num_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("partition_name_to_vals", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_num_partitions_by_filter_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.filter = &filter; + ThriftHiveMetastore_partition_name_to_vals_pargs args; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70230,7 +73000,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_num_partitions_by_filter(c return cseqid; } -int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -70259,7 +73029,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_num_partitions_by_filter") != 0) { + if (fname.compare("partition_name_to_vals") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70268,27 +73038,23 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int32_t _return; - ThriftHiveMetastore_get_num_partitions_by_filter_presult result; + ThriftHiveMetastore_partition_name_to_vals_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_num_partitions_by_filter failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_to_vals failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70298,22 +73064,20 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(c } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) +void ThriftHiveMetastoreConcurrentClient::partition_name_to_spec(std::map & _return, const std::string& part_name) { - int32_t seqid = send_get_partitions_by_names(db_name, tbl_name, names); - recv_get_partitions_by_names(_return, seqid); + int32_t seqid = send_partition_name_to_spec(part_name); + recv_partition_name_to_spec(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names) +int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_spec(const std::string& part_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("partition_name_to_spec", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_by_names_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.names = &names; + ThriftHiveMetastore_partition_name_to_spec_pargs args; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70324,7 +73088,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_names(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::map & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -70353,7 +73117,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vect iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_by_names") != 0) { + if (fname.compare("partition_name_to_spec") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70362,7 +73126,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vect using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_by_names_presult result; + ThriftHiveMetastore_partition_name_to_spec_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -70377,12 +73141,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vect sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_names failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_to_spec failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70392,22 +73152,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vect } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) +void ThriftHiveMetastoreConcurrentClient::markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { - int32_t seqid = send_alter_partition(db_name, tbl_name, new_part); - recv_alter_partition(seqid); + int32_t seqid = send_markPartitionForEvent(db_name, tbl_name, part_vals, eventType); + recv_markPartitionForEvent(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) +int32_t ThriftHiveMetastoreConcurrentClient::send_markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("markPartitionForEvent", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partition_pargs args; + ThriftHiveMetastore_markPartitionForEvent_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.new_part = &new_part; + args.part_vals = &part_vals; + args.eventType = &eventType; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70418,7 +73179,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition(const std::str return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32_t seqid) { int32_t rseqid = 0; @@ -70447,7 +73208,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partition") != 0) { + if (fname.compare("markPartitionForEvent") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70456,7 +73217,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_partition_presult result; + ThriftHiveMetastore_markPartitionForEvent_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70469,6 +73230,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seq sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + if (result.__isset.o5) { + sentry.commit(); + throw result.o5; + } + if (result.__isset.o6) { + sentry.commit(); + throw result.o6; + } sentry.commit(); return; } @@ -70480,22 +73257,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) +bool ThriftHiveMetastoreConcurrentClient::isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { - int32_t seqid = send_alter_partitions(db_name, tbl_name, new_parts); - recv_alter_partitions(seqid); + int32_t seqid = send_isPartitionMarkedForEvent(db_name, tbl_name, part_vals, eventType); + return recv_isPartitionMarkedForEvent(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("isPartitionMarkedForEvent", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partitions_pargs args; + ThriftHiveMetastore_isPartitionMarkedForEvent_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.new_parts = &new_parts; + args.part_vals = &part_vals; + args.eventType = &eventType; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70506,7 +73284,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions(const std::st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const int32_t seqid) { int32_t rseqid = 0; @@ -70535,7 +73313,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t se iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partitions") != 0) { + if (fname.compare("isPartitionMarkedForEvent") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70544,11 +73322,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t se using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_partitions_presult result; + bool _return; + ThriftHiveMetastore_isPartitionMarkedForEvent_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + sentry.commit(); + return _return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -70557,8 +73341,24 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t se sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + if (result.__isset.o5) { + sentry.commit(); + throw result.o5; + } + if (result.__isset.o6) { + sentry.commit(); + throw result.o6; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "isPartitionMarkedForEvent failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70568,23 +73368,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t se } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::add_index(Index& _return, const Index& new_index, const Table& index_table) { - int32_t seqid = send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); - recv_alter_partitions_with_environment_context(seqid); + int32_t seqid = send_add_index(new_index, index_table); + recv_add_index(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_index(const Index& new_index, const Table& index_table) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_index", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partitions_with_environment_context_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.new_parts = &new_parts; - args.environment_context = &environment_context; + ThriftHiveMetastore_add_index_pargs args; + args.new_index = &new_index; + args.index_table = &index_table; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70595,7 +73393,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions_with_environm return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -70624,7 +73422,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partitions_with_environment_context") != 0) { + if (fname.compare("add_index") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70633,11 +73431,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_partitions_with_environment_context_presult result; + ThriftHiveMetastore_add_index_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -70646,8 +73450,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_index failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70657,23 +73465,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) { - int32_t seqid = send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context); - recv_alter_partition_with_environment_context(seqid); + int32_t seqid = send_alter_index(dbname, base_tbl_name, idx_name, new_idx); + recv_alter_index(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_index", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partition_with_environment_context_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.new_part = &new_part; - args.environment_context = &environment_context; + ThriftHiveMetastore_alter_index_pargs args; + args.dbname = &dbname; + args.base_tbl_name = &base_tbl_name; + args.idx_name = &idx_name; + args.new_idx = &new_idx; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70684,7 +73492,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition_with_environme return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) { int32_t rseqid = 0; @@ -70713,7 +73521,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_ iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partition_with_environment_context") != 0) { + if (fname.compare("alter_index") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70722,7 +73530,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_ using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_partition_with_environment_context_presult result; + ThriftHiveMetastore_alter_index_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70746,23 +73554,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_ } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) +bool ThriftHiveMetastoreConcurrentClient::drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) { - int32_t seqid = send_rename_partition(db_name, tbl_name, part_vals, new_part); - recv_rename_partition(seqid); + int32_t seqid = send_drop_index_by_name(db_name, tbl_name, index_name, deleteData); + return recv_drop_index_by_name(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("rename_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_rename_partition_pargs args; + ThriftHiveMetastore_drop_index_by_name_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.new_part = &new_part; + args.index_name = &index_name; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70773,7 +73581,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_rename_partition(const std::st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t seqid) { int32_t rseqid = 0; @@ -70802,7 +73610,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t se iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("rename_partition") != 0) { + if (fname.compare("drop_index_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70811,11 +73619,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t se using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_rename_partition_presult result; + bool _return; + ThriftHiveMetastore_drop_index_by_name_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + sentry.commit(); + return _return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -70824,8 +73638,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t se sentry.commit(); throw result.o2; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_index_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70835,21 +73649,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t se } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) +void ThriftHiveMetastoreConcurrentClient::get_index_by_name(Index& _return, const std::string& db_name, const std::string& tbl_name, const std::string& index_name) { - int32_t seqid = send_partition_name_has_valid_characters(part_vals, throw_exception); - return recv_partition_name_has_valid_characters(seqid); + int32_t seqid = send_get_index_by_name(db_name, tbl_name, index_name); + recv_get_index_by_name(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("partition_name_has_valid_characters", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_partition_name_has_valid_characters_pargs args; - args.part_vals = &part_vals; - args.throw_exception = &throw_exception; + ThriftHiveMetastore_get_index_by_name_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.index_name = &index_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70860,7 +73675,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_has_valid_chara return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characters(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -70889,7 +73704,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characte iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("partition_name_has_valid_characters") != 0) { + if (fname.compare("get_index_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70898,23 +73713,27 @@ bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characte using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_partition_name_has_valid_characters_presult result; + ThriftHiveMetastore_get_index_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_has_valid_characters failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70924,21 +73743,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characte } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) +void ThriftHiveMetastoreConcurrentClient::get_indexes(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { - int32_t seqid = send_get_config_value(name, defaultValue); - recv_get_config_value(_return, seqid); + int32_t seqid = send_get_indexes(db_name, tbl_name, max_indexes); + recv_get_indexes(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_config_value(const std::string& name, const std::string& defaultValue) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_indexes(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_config_value", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_indexes", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_config_value_pargs args; - args.name = &name; - args.defaultValue = &defaultValue; + ThriftHiveMetastore_get_indexes_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.max_indexes = &max_indexes; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70949,7 +73769,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_config_value(const std::st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -70978,7 +73798,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_config_value") != 0) { + if (fname.compare("get_indexes") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70987,7 +73807,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_config_value_presult result; + ThriftHiveMetastore_get_indexes_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -71002,8 +73822,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _re sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_config_value failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_indexes failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71013,20 +73837,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _re } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::partition_name_to_vals(std::vector & _return, const std::string& part_name) +void ThriftHiveMetastoreConcurrentClient::get_index_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { - int32_t seqid = send_partition_name_to_vals(part_name); - recv_partition_name_to_vals(_return, seqid); + int32_t seqid = send_get_index_names(db_name, tbl_name, max_indexes); + recv_get_index_names(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_vals(const std::string& part_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("partition_name_to_vals", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_index_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_partition_name_to_vals_pargs args; - args.part_name = &part_name; + ThriftHiveMetastore_get_index_names_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.max_indexes = &max_indexes; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71037,7 +73863,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_vals(const s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_index_names(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -71066,7 +73892,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vecto iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("partition_name_to_vals") != 0) { + if (fname.compare("get_index_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71075,7 +73901,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vecto using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_partition_name_to_vals_presult result; + ThriftHiveMetastore_get_index_names_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -71086,12 +73912,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vecto sentry.commit(); return; } - if (result.__isset.o1) { + if (result.__isset.o2) { sentry.commit(); - throw result.o1; + throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_to_vals failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_names failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71101,20 +73927,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vecto } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::partition_name_to_spec(std::map & _return, const std::string& part_name) +void ThriftHiveMetastoreConcurrentClient::get_primary_keys(PrimaryKeysResponse& _return, const PrimaryKeysRequest& request) { - int32_t seqid = send_partition_name_to_spec(part_name); - recv_partition_name_to_spec(_return, seqid); + int32_t seqid = send_get_primary_keys(request); + recv_get_primary_keys(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_spec(const std::string& part_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_primary_keys(const PrimaryKeysRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("partition_name_to_spec", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_partition_name_to_spec_pargs args; - args.part_name = &part_name; + ThriftHiveMetastore_get_primary_keys_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71125,7 +73951,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_spec(const s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::map & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -71154,7 +73980,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::mapreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("partition_name_to_spec") != 0) { + if (fname.compare("get_primary_keys") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71163,7 +73989,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::mapreadMessageEnd(); @@ -71174,91 +74000,6 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::mapsync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) -{ - int32_t seqid = send_markPartitionForEvent(db_name, tbl_name, part_vals, eventType); - recv_markPartitionForEvent(seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("markPartitionForEvent", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_markPartitionForEvent_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.eventType = &eventType; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("markPartitionForEvent") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - ThriftHiveMetastore_markPartitionForEvent_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -71267,24 +74008,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32 sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } - if (result.__isset.o5) { - sentry.commit(); - throw result.o5; - } - if (result.__isset.o6) { - sentry.commit(); - throw result.o6; - } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_primary_keys failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71294,23 +74019,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32 } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) +void ThriftHiveMetastoreConcurrentClient::get_foreign_keys(ForeignKeysResponse& _return, const ForeignKeysRequest& request) { - int32_t seqid = send_isPartitionMarkedForEvent(db_name, tbl_name, part_vals, eventType); - return recv_isPartitionMarkedForEvent(seqid); + int32_t seqid = send_get_foreign_keys(request); + recv_get_foreign_keys(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_foreign_keys(const ForeignKeysRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("isPartitionMarkedForEvent", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_isPartitionMarkedForEvent_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.eventType = &eventType; + ThriftHiveMetastore_get_foreign_keys_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71321,7 +74043,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_isPartitionMarkedForEvent(cons return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -71350,7 +74072,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const i iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("isPartitionMarkedForEvent") != 0) { + if (fname.compare("get_foreign_keys") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71359,16 +74081,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const i using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_isPartitionMarkedForEvent_presult result; + ThriftHiveMetastore_get_foreign_keys_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -71378,24 +74100,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const i sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } - if (result.__isset.o5) { - sentry.commit(); - throw result.o5; - } - if (result.__isset.o6) { - sentry.commit(); - throw result.o6; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "isPartitionMarkedForEvent failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_foreign_keys failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71405,21 +74111,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const i } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_index(Index& _return, const Index& new_index, const Table& index_table) +void ThriftHiveMetastoreConcurrentClient::get_unique_constraints(UniqueConstraintsResponse& _return, const UniqueConstraintsRequest& request) { - int32_t seqid = send_add_index(new_index, index_table); - recv_add_index(_return, seqid); + int32_t seqid = send_get_unique_constraints(request); + recv_get_unique_constraints(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_index(const Index& new_index, const Table& index_table) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_unique_constraints(const UniqueConstraintsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_index", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_unique_constraints", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_index_pargs args; - args.new_index = &new_index; - args.index_table = &index_table; + ThriftHiveMetastore_get_unique_constraints_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71430,7 +74135,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_index(const Index& new_ind return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_unique_constraints(UniqueConstraintsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -71459,7 +74164,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const i iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_index") != 0) { + if (fname.compare("get_unique_constraints") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71468,7 +74173,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const i using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_index_presult result; + ThriftHiveMetastore_get_unique_constraints_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -71487,12 +74192,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const i sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_index failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_unique_constraints failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71502,23 +74203,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const i } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) +void ThriftHiveMetastoreConcurrentClient::get_not_null_constraints(NotNullConstraintsResponse& _return, const NotNullConstraintsRequest& request) { - int32_t seqid = send_alter_index(dbname, base_tbl_name, idx_name, new_idx); - recv_alter_index(seqid); + int32_t seqid = send_get_not_null_constraints(request); + recv_get_not_null_constraints(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_not_null_constraints(const NotNullConstraintsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_index", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_not_null_constraints", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_index_pargs args; - args.dbname = &dbname; - args.base_tbl_name = &base_tbl_name; - args.idx_name = &idx_name; - args.new_idx = &new_idx; + ThriftHiveMetastore_get_not_null_constraints_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71529,7 +74227,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_index(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_not_null_constraints(NotNullConstraintsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -71558,7 +74256,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_index") != 0) { + if (fname.compare("get_not_null_constraints") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71567,11 +74265,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_index_presult result; + ThriftHiveMetastore_get_not_null_constraints_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -71580,8 +74284,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) sentry.commit(); throw result.o2; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_not_null_constraints failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71591,23 +74295,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) +bool ThriftHiveMetastoreConcurrentClient::update_table_column_statistics(const ColumnStatistics& stats_obj) { - int32_t seqid = send_drop_index_by_name(db_name, tbl_name, index_name, deleteData); - return recv_drop_index_by_name(seqid); + int32_t seqid = send_update_table_column_statistics(stats_obj); + return recv_update_table_column_statistics(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) +int32_t ThriftHiveMetastoreConcurrentClient::send_update_table_column_statistics(const ColumnStatistics& stats_obj) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("update_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_index_by_name_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.index_name = &index_name; - args.deleteData = &deleteData; + ThriftHiveMetastore_update_table_column_statistics_pargs args; + args.stats_obj = &stats_obj; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71618,7 +74319,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_index_by_name(const std:: return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(const int32_t seqid) { int32_t rseqid = 0; @@ -71647,7 +74348,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_index_by_name") != 0) { + if (fname.compare("update_table_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71657,7 +74358,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_drop_index_by_name_presult result; + ThriftHiveMetastore_update_table_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -71675,8 +74376,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_index_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_table_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71686,22 +74395,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_index_by_name(Index& _return, const std::string& db_name, const std::string& tbl_name, const std::string& index_name) +bool ThriftHiveMetastoreConcurrentClient::update_partition_column_statistics(const ColumnStatistics& stats_obj) { - int32_t seqid = send_get_index_by_name(db_name, tbl_name, index_name); - recv_get_index_by_name(_return, seqid); + int32_t seqid = send_update_partition_column_statistics(stats_obj); + return recv_update_partition_column_statistics(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_update_partition_column_statistics(const ColumnStatistics& stats_obj) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("update_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_index_by_name_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.index_name = &index_name; + ThriftHiveMetastore_update_partition_column_statistics_pargs args; + args.stats_obj = &stats_obj; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71712,7 +74419,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_by_name(const std::s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistics(const int32_t seqid) { int32_t rseqid = 0; @@ -71741,7 +74448,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_index_by_name") != 0) { + if (fname.compare("update_partition_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71750,16 +74457,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_index_by_name_presult result; + bool _return; + ThriftHiveMetastore_update_partition_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -71769,8 +74476,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_partition_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71780,22 +74495,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_indexes(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) +void ThriftHiveMetastoreConcurrentClient::get_table_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& col_name) { - int32_t seqid = send_get_indexes(db_name, tbl_name, max_indexes); - recv_get_indexes(_return, seqid); + int32_t seqid = send_get_table_column_statistics(db_name, tbl_name, col_name); + recv_get_table_column_statistics(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_indexes(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_indexes", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_indexes_pargs args; + ThriftHiveMetastore_get_table_column_statistics_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.max_indexes = &max_indexes; + args.col_name = &col_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71806,7 +74521,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_indexes(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(ColumnStatistics& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -71835,7 +74550,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_indexes") != 0) { + if (fname.compare("get_table_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71844,7 +74559,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_indexes_presult result; + ThriftHiveMetastore_get_table_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -71863,98 +74578,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_indexes failed: unknown result"); - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::get_index_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) -{ - int32_t seqid = send_get_index_names(db_name, tbl_name, max_indexes); - recv_get_index_names(_return, seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_index_names", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_get_index_names_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.max_indexes = &max_indexes; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -void ThriftHiveMetastoreConcurrentClient::recv_get_index_names(std::vector & _return, const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_index_names") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - ThriftHiveMetastore_get_index_names_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o3) { sentry.commit(); - return; + throw result.o3; } - if (result.__isset.o2) { + if (result.__isset.o4) { sentry.commit(); - throw result.o2; + throw result.o4; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_names failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71964,20 +74597,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_names(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_primary_keys_pargs args; - args.request = &request; + ThriftHiveMetastore_get_partition_column_statistics_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_name = &part_name; + args.col_name = &col_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71988,7 +74624,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_primary_keys(const Primary return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(ColumnStatistics& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -72017,7 +74653,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysRespo iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_primary_keys") != 0) { + if (fname.compare("get_partition_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72026,7 +74662,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysRespo using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_primary_keys_presult result; + ThriftHiveMetastore_get_partition_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -72045,100 +74681,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysRespo sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_primary_keys failed: unknown result"); - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::get_foreign_keys(ForeignKeysResponse& _return, const ForeignKeysRequest& request) -{ - int32_t seqid = send_get_foreign_keys(request); - recv_get_foreign_keys(_return, seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_get_foreign_keys(const ForeignKeysRequest& request) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_get_foreign_keys_pargs args; - args.request = &request; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysResponse& _return, const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_foreign_keys") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - ThriftHiveMetastore_get_foreign_keys_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } - if (result.__isset.o1) { + if (result.__isset.o3) { sentry.commit(); - throw result.o1; + throw result.o3; } - if (result.__isset.o2) { + if (result.__isset.o4) { sentry.commit(); - throw result.o2; + throw result.o4; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_foreign_keys failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72148,19 +74700,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysRespo } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_unique_constraints(UniqueConstraintsResponse& _return, const UniqueConstraintsRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_table_statistics_req(TableStatsResult& _return, const TableStatsRequest& request) { - int32_t seqid = send_get_unique_constraints(request); - recv_get_unique_constraints(_return, seqid); + int32_t seqid = send_get_table_statistics_req(request); + recv_get_table_statistics_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_unique_constraints(const UniqueConstraintsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_statistics_req(const TableStatsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_unique_constraints", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_unique_constraints_pargs args; + ThriftHiveMetastore_get_table_statistics_req_pargs args; args.request = &request; args.write(oprot_); @@ -72172,7 +74724,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_unique_constraints(const U return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_unique_constraints(UniqueConstraintsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableStatsResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -72201,7 +74753,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_unique_constraints(UniqueCons iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_unique_constraints") != 0) { + if (fname.compare("get_table_statistics_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72210,7 +74762,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_unique_constraints(UniqueCons using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_unique_constraints_presult result; + ThriftHiveMetastore_get_table_statistics_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -72230,7 +74782,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_unique_constraints(UniqueCons throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_unique_constraints failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_statistics_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72240,19 +74792,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_unique_constraints(UniqueCons } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_not_null_constraints(NotNullConstraintsResponse& _return, const NotNullConstraintsRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_partitions_statistics_req(PartitionsStatsResult& _return, const PartitionsStatsRequest& request) { - int32_t seqid = send_get_not_null_constraints(request); - recv_get_not_null_constraints(_return, seqid); + int32_t seqid = send_get_partitions_statistics_req(request); + recv_get_partitions_statistics_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_not_null_constraints(const NotNullConstraintsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_statistics_req(const PartitionsStatsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_not_null_constraints", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_not_null_constraints_pargs args; + ThriftHiveMetastore_get_partitions_statistics_req_pargs args; args.request = &request; args.write(oprot_); @@ -72264,7 +74816,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_not_null_constraints(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_not_null_constraints(NotNullConstraintsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(PartitionsStatsResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -72293,7 +74845,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_not_null_constraints(NotNullC iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_not_null_constraints") != 0) { + if (fname.compare("get_partitions_statistics_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72302,7 +74854,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_not_null_constraints(NotNullC using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_not_null_constraints_presult result; + ThriftHiveMetastore_get_partitions_statistics_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -72322,7 +74874,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_not_null_constraints(NotNullC throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_not_null_constraints failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_statistics_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72332,20 +74884,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_not_null_constraints(NotNullC } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::update_table_column_statistics(const ColumnStatistics& stats_obj) +void ThriftHiveMetastoreConcurrentClient::get_aggr_stats_for(AggrStats& _return, const PartitionsStatsRequest& request) { - int32_t seqid = send_update_table_column_statistics(stats_obj); - return recv_update_table_column_statistics(seqid); + int32_t seqid = send_get_aggr_stats_for(request); + recv_get_aggr_stats_for(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_update_table_column_statistics(const ColumnStatistics& stats_obj) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_aggr_stats_for(const PartitionsStatsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("update_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_update_table_column_statistics_pargs args; - args.stats_obj = &stats_obj; + ThriftHiveMetastore_get_aggr_stats_for_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -72356,7 +74908,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_update_table_column_statistics return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -72385,7 +74937,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(co iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("update_table_column_statistics") != 0) { + if (fname.compare("get_aggr_stats_for") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72394,16 +74946,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(co using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_update_table_column_statistics_presult result; + ThriftHiveMetastore_get_aggr_stats_for_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -72413,16 +74965,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(co sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_table_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_aggr_stats_for failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72432,20 +74976,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(co } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::update_partition_column_statistics(const ColumnStatistics& stats_obj) +bool ThriftHiveMetastoreConcurrentClient::set_aggr_stats_for(const SetPartitionsStatsRequest& request) { - int32_t seqid = send_update_partition_column_statistics(stats_obj); - return recv_update_partition_column_statistics(seqid); + int32_t seqid = send_set_aggr_stats_for(request); + return recv_set_aggr_stats_for(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_update_partition_column_statistics(const ColumnStatistics& stats_obj) +int32_t ThriftHiveMetastoreConcurrentClient::send_set_aggr_stats_for(const SetPartitionsStatsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("update_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_update_partition_column_statistics_pargs args; - args.stats_obj = &stats_obj; + ThriftHiveMetastore_set_aggr_stats_for_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -72456,7 +75000,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_update_partition_column_statis return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistics(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t seqid) { int32_t rseqid = 0; @@ -72485,7 +75029,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistic iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("update_partition_column_statistics") != 0) { + if (fname.compare("set_aggr_stats_for") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72495,7 +75039,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistic throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_update_partition_column_statistics_presult result; + ThriftHiveMetastore_set_aggr_stats_for_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -72522,7 +75066,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistic throw result.o4; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_partition_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_aggr_stats_for failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72532,21 +75076,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistic } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +bool ThriftHiveMetastoreConcurrentClient::delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { - int32_t seqid = send_get_table_column_statistics(db_name, tbl_name, col_name); - recv_get_table_column_statistics(_return, seqid); + int32_t seqid = send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name); + return recv_delete_partition_column_statistics(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("delete_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_column_statistics_pargs args; + ThriftHiveMetastore_delete_partition_column_statistics_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; + args.part_name = &part_name; args.col_name = &col_name; args.write(oprot_); @@ -72558,7 +75103,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_column_statistics(co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(ColumnStatistics& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistics(const int32_t seqid) { int32_t rseqid = 0; @@ -72587,7 +75132,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(Colum iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_column_statistics") != 0) { + if (fname.compare("delete_partition_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72596,16 +75141,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(Colum using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_column_statistics_presult result; + bool _return; + ThriftHiveMetastore_delete_partition_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -72624,7 +75169,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(Colum throw result.o4; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_partition_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72634,22 +75179,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(Colum } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partition_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +bool ThriftHiveMetastoreConcurrentClient::delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) { - int32_t seqid = send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name); - recv_get_partition_column_statistics(_return, seqid); + int32_t seqid = send_delete_table_column_statistics(db_name, tbl_name, col_name); + return recv_delete_table_column_statistics(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("delete_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_column_statistics_pargs args; + ThriftHiveMetastore_delete_table_column_statistics_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_name = &part_name; args.col_name = &col_name; args.write(oprot_); @@ -72661,7 +75205,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_column_statistic return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(ColumnStatistics& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(const int32_t seqid) { int32_t rseqid = 0; @@ -72690,7 +75234,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(C iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_column_statistics") != 0) { + if (fname.compare("delete_table_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72699,16 +75243,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(C using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_column_statistics_presult result; + bool _return; + ThriftHiveMetastore_delete_table_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -72727,7 +75271,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(C throw result.o4; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_table_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72737,20 +75281,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(C } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_statistics_req(TableStatsResult& _return, const TableStatsRequest& request) +void ThriftHiveMetastoreConcurrentClient::create_function(const Function& func) { - int32_t seqid = send_get_table_statistics_req(request); - recv_get_table_statistics_req(_return, seqid); + int32_t seqid = send_create_function(func); + recv_create_function(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_statistics_req(const TableStatsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_function(const Function& func) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_statistics_req_pargs args; - args.request = &request; + ThriftHiveMetastore_create_function_pargs args; + args.func = &func; args.write(oprot_); oprot_->writeMessageEnd(); @@ -72761,7 +75305,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_statistics_req(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableStatsResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seqid) { int32_t rseqid = 0; @@ -72790,7 +75334,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableSta iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_statistics_req") != 0) { + if (fname.compare("create_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72799,17 +75343,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableSta using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_statistics_req_presult result; - result.success = &_return; + ThriftHiveMetastore_create_function_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -72818,8 +75356,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableSta sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_statistics_req failed: unknown result"); + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72829,20 +75375,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableSta } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_statistics_req(PartitionsStatsResult& _return, const PartitionsStatsRequest& request) +void ThriftHiveMetastoreConcurrentClient::drop_function(const std::string& dbName, const std::string& funcName) { - int32_t seqid = send_get_partitions_statistics_req(request); - recv_get_partitions_statistics_req(_return, seqid); + int32_t seqid = send_drop_function(dbName, funcName); + recv_drop_function(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_statistics_req(const PartitionsStatsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_function(const std::string& dbName, const std::string& funcName) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_statistics_req_pargs args; - args.request = &request; + ThriftHiveMetastore_drop_function_pargs args; + args.dbName = &dbName; + args.funcName = &funcName; args.write(oprot_); oprot_->writeMessageEnd(); @@ -72853,7 +75400,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_statistics_req( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(PartitionsStatsResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid) { int32_t rseqid = 0; @@ -72882,7 +75429,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(Par iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_statistics_req") != 0) { + if (fname.compare("drop_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72891,27 +75438,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(Par using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_statistics_req_presult result; - result.success = &_return; + ThriftHiveMetastore_drop_function_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { + if (result.__isset.o3) { sentry.commit(); - throw result.o2; + throw result.o3; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_statistics_req failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72921,20 +75462,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(Par } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_aggr_stats_for(AggrStats& _return, const PartitionsStatsRequest& request) +void ThriftHiveMetastoreConcurrentClient::alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) { - int32_t seqid = send_get_aggr_stats_for(request); - recv_get_aggr_stats_for(_return, seqid); + int32_t seqid = send_alter_function(dbName, funcName, newFunc); + recv_alter_function(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_aggr_stats_for(const PartitionsStatsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_aggr_stats_for_pargs args; - args.request = &request; + ThriftHiveMetastore_alter_function_pargs args; + args.dbName = &dbName; + args.funcName = &funcName; + args.newFunc = &newFunc; args.write(oprot_); oprot_->writeMessageEnd(); @@ -72945,7 +75488,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_aggr_stats_for(const Parti return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqid) { int32_t rseqid = 0; @@ -72974,7 +75517,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_aggr_stats_for") != 0) { + if (fname.compare("alter_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72983,17 +75526,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_aggr_stats_for_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_function_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -73002,8 +75539,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _re sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_aggr_stats_for failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73013,20 +75550,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _re } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::set_aggr_stats_for(const SetPartitionsStatsRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_functions(std::vector & _return, const std::string& dbName, const std::string& pattern) { - int32_t seqid = send_set_aggr_stats_for(request); - return recv_set_aggr_stats_for(seqid); + int32_t seqid = send_get_functions(dbName, pattern); + recv_get_functions(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_set_aggr_stats_for(const SetPartitionsStatsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_functions(const std::string& dbName, const std::string& pattern) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_functions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_set_aggr_stats_for_pargs args; - args.request = &request; + ThriftHiveMetastore_get_functions_pargs args; + args.dbName = &dbName; + args.pattern = &pattern; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73037,7 +75575,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_set_aggr_stats_for(const SetPa return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73066,7 +75604,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("set_aggr_stats_for") != 0) { + if (fname.compare("get_functions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73075,35 +75613,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_set_aggr_stats_for_presult result; + ThriftHiveMetastore_get_functions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_aggr_stats_for failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_functions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73113,23 +75639,21 @@ bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +void ThriftHiveMetastoreConcurrentClient::get_function(Function& _return, const std::string& dbName, const std::string& funcName) { - int32_t seqid = send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name); - return recv_delete_partition_column_statistics(seqid); + int32_t seqid = send_get_function(dbName, funcName); + recv_get_function(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_function(const std::string& dbName, const std::string& funcName) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("delete_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_delete_partition_column_statistics_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.col_name = &col_name; + ThriftHiveMetastore_get_function_pargs args; + args.dbName = &dbName; + args.funcName = &funcName; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73140,7 +75664,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_delete_partition_column_statis return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistics(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73169,7 +75693,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistic iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("delete_partition_column_statistics") != 0) { + if (fname.compare("get_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73178,16 +75702,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistic using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_delete_partition_column_statistics_presult result; + ThriftHiveMetastore_get_function_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -73197,16 +75721,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistic sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_partition_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_function failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73216,22 +75732,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistic } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +void ThriftHiveMetastoreConcurrentClient::get_all_functions(GetAllFunctionsResponse& _return) { - int32_t seqid = send_delete_table_column_statistics(db_name, tbl_name, col_name); - return recv_delete_table_column_statistics(seqid); + int32_t seqid = send_get_all_functions(); + recv_get_all_functions(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_functions() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("delete_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_functions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_delete_table_column_statistics_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.col_name = &col_name; + ThriftHiveMetastore_get_all_functions_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73242,7 +75755,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_delete_table_column_statistics return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctionsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73271,7 +75784,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(co iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("delete_table_column_statistics") != 0) { + if (fname.compare("get_all_functions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73280,35 +75793,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(co using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_delete_table_column_statistics_presult result; + ThriftHiveMetastore_get_all_functions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_table_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_functions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73318,20 +75819,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(co } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_function(const Function& func) +bool ThriftHiveMetastoreConcurrentClient::create_role(const Role& role) { - int32_t seqid = send_create_function(func); - recv_create_function(seqid); + int32_t seqid = send_create_role(role); + return recv_create_role(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_function(const Function& func) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_role(const Role& role) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_function_pargs args; - args.func = &func; + ThriftHiveMetastore_create_role_pargs args; + args.role = &role; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73342,7 +75843,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_function(const Function return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_create_role(const int32_t seqid) { int32_t rseqid = 0; @@ -73371,7 +75872,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_function") != 0) { + if (fname.compare("create_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73380,29 +75881,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_function_presult result; + bool _return; + ThriftHiveMetastore_create_role_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { + if (result.__isset.success) { sentry.commit(); - throw result.o3; + return _return; } - if (result.__isset.o4) { + if (result.__isset.o1) { sentry.commit(); - throw result.o4; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73412,21 +75907,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_function(const std::string& dbName, const std::string& funcName) +bool ThriftHiveMetastoreConcurrentClient::drop_role(const std::string& role_name) { - int32_t seqid = send_drop_function(dbName, funcName); - recv_drop_function(seqid); + int32_t seqid = send_drop_role(role_name); + return recv_drop_role(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_function(const std::string& dbName, const std::string& funcName) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_role(const std::string& role_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_function_pargs args; - args.dbName = &dbName; - args.funcName = &funcName; + ThriftHiveMetastore_drop_role_pargs args; + args.role_name = &role_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73437,7 +75931,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_function(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) { int32_t rseqid = 0; @@ -73466,7 +75960,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_function") != 0) { + if (fname.compare("drop_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73475,21 +75969,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_function_presult result; + bool _return; + ThriftHiveMetastore_drop_role_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { sentry.commit(); - throw result.o1; + return _return; } - if (result.__isset.o3) { + if (result.__isset.o1) { sentry.commit(); - throw result.o3; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73499,22 +75995,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) +void ThriftHiveMetastoreConcurrentClient::get_role_names(std::vector & _return) { - int32_t seqid = send_alter_function(dbName, funcName, newFunc); - recv_alter_function(seqid); + int32_t seqid = send_get_role_names(); + recv_get_role_names(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_names() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_role_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_function_pargs args; - args.dbName = &dbName; - args.funcName = &funcName; - args.newFunc = &newFunc; + ThriftHiveMetastore_get_role_names_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73525,7 +76018,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_function(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73554,7 +76047,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqi iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_function") != 0) { + if (fname.compare("get_role_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73563,21 +76056,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqi using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_function_presult result; + ThriftHiveMetastore_get_role_names_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_role_names failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73587,21 +76082,25 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqi } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_functions(std::vector & _return, const std::string& dbName, const std::string& pattern) +bool ThriftHiveMetastoreConcurrentClient::grant_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type, const std::string& grantor, const PrincipalType::type grantorType, const bool grant_option) { - int32_t seqid = send_get_functions(dbName, pattern); - recv_get_functions(_return, seqid); + int32_t seqid = send_grant_role(role_name, principal_name, principal_type, grantor, grantorType, grant_option); + return recv_grant_role(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_functions(const std::string& dbName, const std::string& pattern) +int32_t ThriftHiveMetastoreConcurrentClient::send_grant_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type, const std::string& grantor, const PrincipalType::type grantorType, const bool grant_option) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_functions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_functions_pargs args; - args.dbName = &dbName; - args.pattern = &pattern; + ThriftHiveMetastore_grant_role_pargs args; + args.role_name = &role_name; + args.principal_name = &principal_name; + args.principal_type = &principal_type; + args.grantor = &grantor; + args.grantorType = &grantorType; + args.grant_option = &grant_option; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73612,7 +76111,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_functions(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vector & _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_grant_role(const int32_t seqid) { int32_t rseqid = 0; @@ -73641,7 +76140,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_functions") != 0) { + if (fname.compare("grant_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73650,23 +76149,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_functions failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73676,21 +76175,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_function_pargs args; - args.dbName = &dbName; - args.funcName = &funcName; + ThriftHiveMetastore_revoke_role_pargs args; + args.role_name = &role_name; + args.principal_name = &principal_name; + args.principal_type = &principal_type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73701,7 +76201,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_function(const std::string return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) { int32_t rseqid = 0; @@ -73730,7 +76230,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_function") != 0) { + if (fname.compare("revoke_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73739,27 +76239,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_function_presult result; + bool _return; + ThriftHiveMetastore_revoke_role_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_function failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73769,19 +76265,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, c } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_all_functions(GetAllFunctionsResponse& _return) +void ThriftHiveMetastoreConcurrentClient::list_roles(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type) { - int32_t seqid = send_get_all_functions(); - recv_get_all_functions(_return, seqid); + int32_t seqid = send_list_roles(principal_name, principal_type); + recv_list_roles(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_functions() +int32_t ThriftHiveMetastoreConcurrentClient::send_list_roles(const std::string& principal_name, const PrincipalType::type principal_type) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_all_functions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("list_roles", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_functions_pargs args; + ThriftHiveMetastore_list_roles_pargs args; + args.principal_name = &principal_name; + args.principal_type = &principal_type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73792,7 +76290,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_functions() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctionsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73821,7 +76319,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctions iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_functions") != 0) { + if (fname.compare("list_roles") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73830,7 +76328,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctions using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_all_functions_presult result; + ThriftHiveMetastore_list_roles_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -73846,7 +76344,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctions throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_functions failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "list_roles failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73856,20 +76354,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctions } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::create_role(const Role& role) +void ThriftHiveMetastoreConcurrentClient::grant_revoke_role(GrantRevokeRoleResponse& _return, const GrantRevokeRoleRequest& request) { - int32_t seqid = send_create_role(role); - return recv_create_role(seqid); + int32_t seqid = send_grant_revoke_role(request); + recv_grant_revoke_role(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_role(const Role& role) +int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_role(const GrantRevokeRoleRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_role_pargs args; - args.role = &role; + ThriftHiveMetastore_grant_revoke_role_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73880,7 +76378,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_role(const Role& role) return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_create_role(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRoleResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73909,7 +76407,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_role(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_role") != 0) { + if (fname.compare("grant_revoke_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73918,23 +76416,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_role(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_create_role_presult result; + ThriftHiveMetastore_grant_revoke_role_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73944,20 +76442,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_role(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_role(const std::string& role_name) +void ThriftHiveMetastoreConcurrentClient::get_principals_in_role(GetPrincipalsInRoleResponse& _return, const GetPrincipalsInRoleRequest& request) { - int32_t seqid = send_drop_role(role_name); - return recv_drop_role(seqid); + int32_t seqid = send_get_principals_in_role(request); + recv_get_principals_in_role(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_role(const std::string& role_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_principals_in_role(const GetPrincipalsInRoleRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_principals_in_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_role_pargs args; - args.role_name = &role_name; + ThriftHiveMetastore_get_principals_in_role_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73968,7 +76466,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_role(const std::string& r return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincipalsInRoleResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73997,7 +76495,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_role") != 0) { + if (fname.compare("get_principals_in_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74006,23 +76504,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_role_presult result; + ThriftHiveMetastore_get_principals_in_role_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_principals_in_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74032,19 +76530,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_role_names(std::vector & _return) +void ThriftHiveMetastoreConcurrentClient::get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const GetRoleGrantsForPrincipalRequest& request) { - int32_t seqid = send_get_role_names(); - recv_get_role_names(_return, seqid); + int32_t seqid = send_get_role_grants_for_principal(request); + recv_get_role_grants_for_principal(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_names() +int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_grants_for_principal(const GetRoleGrantsForPrincipalRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_role_names", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_role_grants_for_principal", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_role_names_pargs args; + ThriftHiveMetastore_get_role_grants_for_principal_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74055,7 +76554,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_names() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -74084,7 +76583,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_role_names") != 0) { + if (fname.compare("get_role_grants_for_principal") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74093,7 +76592,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vectorreadMessageEnd(); @@ -74109,7 +76608,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -74119,25 +76618,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vector & group_names) { - int32_t seqid = send_grant_role(role_name, principal_name, principal_type, grantor, grantorType, grant_option); - return recv_grant_role(seqid); + int32_t seqid = send_get_privilege_set(hiveObject, user_name, group_names); + recv_get_privilege_set(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_grant_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type, const std::string& grantor, const PrincipalType::type grantorType, const bool grant_option) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_privilege_set(const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("grant_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_privilege_set", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_role_pargs args; - args.role_name = &role_name; - args.principal_name = &principal_name; - args.principal_type = &principal_type; - args.grantor = &grantor; - args.grantorType = &grantorType; - args.grant_option = &grant_option; + ThriftHiveMetastore_get_privilege_set_pargs args; + args.hiveObject = &hiveObject; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74148,7 +76644,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_grant_role(const std::string& return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_grant_role(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivilegeSet& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -74177,7 +76673,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_role(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_role") != 0) { + if (fname.compare("get_privilege_set") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74186,23 +76682,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_role(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_grant_role_presult result; + ThriftHiveMetastore_get_privilege_set_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_privilege_set failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74212,22 +76708,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_role(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) +void ThriftHiveMetastoreConcurrentClient::list_privileges(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) { - int32_t seqid = send_revoke_role(role_name, principal_name, principal_type); - return recv_revoke_role(seqid); + int32_t seqid = send_list_privileges(principal_name, principal_type, hiveObject); + recv_list_privileges(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) +int32_t ThriftHiveMetastoreConcurrentClient::send_list_privileges(const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("list_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_revoke_role_pargs args; - args.role_name = &role_name; + ThriftHiveMetastore_list_privileges_pargs args; args.principal_name = &principal_name; args.principal_type = &principal_type; + args.hiveObject = &hiveObject; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74238,7 +76734,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_role(const std::string& return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -74267,7 +76763,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("revoke_role") != 0) { + if (fname.compare("list_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74276,23 +76772,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_revoke_role_presult result; + ThriftHiveMetastore_list_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "list_privileges failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74302,21 +76798,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::list_roles(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type) +bool ThriftHiveMetastoreConcurrentClient::grant_privileges(const PrivilegeBag& privileges) { - int32_t seqid = send_list_roles(principal_name, principal_type); - recv_list_roles(_return, seqid); + int32_t seqid = send_grant_privileges(privileges); + return recv_grant_privileges(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_list_roles(const std::string& principal_name, const PrincipalType::type principal_type) +int32_t ThriftHiveMetastoreConcurrentClient::send_grant_privileges(const PrivilegeBag& privileges) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("list_roles", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_list_roles_pargs args; - args.principal_name = &principal_name; - args.principal_type = &principal_type; + ThriftHiveMetastore_grant_privileges_pargs args; + args.privileges = &privileges; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74327,7 +76822,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_list_roles(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t seqid) { int32_t rseqid = 0; @@ -74356,7 +76851,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _r iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("list_roles") != 0) { + if (fname.compare("grant_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74365,23 +76860,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _r using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_list_roles_presult result; + bool _return; + ThriftHiveMetastore_grant_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "list_roles failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_privileges failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74391,20 +76886,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _r } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::grant_revoke_role(GrantRevokeRoleResponse& _return, const GrantRevokeRoleRequest& request) +bool ThriftHiveMetastoreConcurrentClient::revoke_privileges(const PrivilegeBag& privileges) { - int32_t seqid = send_grant_revoke_role(request); - recv_grant_revoke_role(_return, seqid); + int32_t seqid = send_revoke_privileges(privileges); + return recv_revoke_privileges(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_role(const GrantRevokeRoleRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_privileges(const PrivilegeBag& privileges) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("grant_revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_revoke_role_pargs args; - args.request = &request; + ThriftHiveMetastore_revoke_privileges_pargs args; + args.privileges = &privileges; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74415,7 +76910,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_role(const GrantR return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRoleResponse& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t seqid) { int32_t rseqid = 0; @@ -74444,7 +76939,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRole iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_revoke_role") != 0) { + if (fname.compare("revoke_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74453,23 +76948,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRole using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_grant_revoke_role_presult result; + bool _return; + ThriftHiveMetastore_revoke_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_privileges failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74479,19 +76974,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRole } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_principals_in_role(GetPrincipalsInRoleResponse& _return, const GetPrincipalsInRoleRequest& request) +void ThriftHiveMetastoreConcurrentClient::grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const GrantRevokePrivilegeRequest& request) { - int32_t seqid = send_get_principals_in_role(request); - recv_get_principals_in_role(_return, seqid); + int32_t seqid = send_grant_revoke_privileges(request); + recv_grant_revoke_privileges(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_principals_in_role(const GetPrincipalsInRoleRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_privileges(const GrantRevokePrivilegeRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_principals_in_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_principals_in_role_pargs args; + ThriftHiveMetastore_grant_revoke_privileges_pargs args; args.request = &request; args.write(oprot_); @@ -74503,7 +76998,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_principals_in_role(const G return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincipalsInRoleResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -74532,7 +77027,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincip iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_principals_in_role") != 0) { + if (fname.compare("grant_revoke_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74541,7 +77036,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincip using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_principals_in_role_presult result; + ThriftHiveMetastore_grant_revoke_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -74557,7 +77052,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincip throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_principals_in_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_privileges failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74567,20 +77062,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincip } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const GetRoleGrantsForPrincipalRequest& request) +void ThriftHiveMetastoreConcurrentClient::set_ugi(std::vector & _return, const std::string& user_name, const std::vector & group_names) { - int32_t seqid = send_get_role_grants_for_principal(request); - recv_get_role_grants_for_principal(_return, seqid); + int32_t seqid = send_set_ugi(user_name, group_names); + recv_set_ugi(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_grants_for_principal(const GetRoleGrantsForPrincipalRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_set_ugi(const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_role_grants_for_principal", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("set_ugi", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_role_grants_for_principal_pargs args; - args.request = &request; + ThriftHiveMetastore_set_ugi_pargs args; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74591,7 +77087,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_grants_for_principal( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -74620,7 +77116,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(Get iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_role_grants_for_principal") != 0) { + if (fname.compare("set_ugi") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74629,7 +77125,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(Get using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_role_grants_for_principal_presult result; + ThriftHiveMetastore_set_ugi_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -74645,7 +77141,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(Get throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_role_grants_for_principal failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_ugi failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74655,22 +77151,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(Get } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_privilege_set(PrincipalPrivilegeSet& _return, const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreConcurrentClient::get_delegation_token(std::string& _return, const std::string& token_owner, const std::string& renewer_kerberos_principal_name) { - int32_t seqid = send_get_privilege_set(hiveObject, user_name, group_names); - recv_get_privilege_set(_return, seqid); + int32_t seqid = send_get_delegation_token(token_owner, renewer_kerberos_principal_name); + recv_get_delegation_token(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_privilege_set(const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_delegation_token(const std::string& token_owner, const std::string& renewer_kerberos_principal_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_privilege_set", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_privilege_set_pargs args; - args.hiveObject = &hiveObject; - args.user_name = &user_name; - args.group_names = &group_names; + ThriftHiveMetastore_get_delegation_token_pargs args; + args.token_owner = &token_owner; + args.renewer_kerberos_principal_name = &renewer_kerberos_principal_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74681,7 +77176,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_privilege_set(const HiveOb return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivilegeSet& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -74710,7 +77205,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivil iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_privilege_set") != 0) { + if (fname.compare("get_delegation_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74719,7 +77214,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivil using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_privilege_set_presult result; + ThriftHiveMetastore_get_delegation_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -74735,7 +77230,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivil throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_privilege_set failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_delegation_token failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74745,22 +77240,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivil } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::list_privileges(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) +int64_t ThriftHiveMetastoreConcurrentClient::renew_delegation_token(const std::string& token_str_form) { - int32_t seqid = send_list_privileges(principal_name, principal_type, hiveObject); - recv_list_privileges(_return, seqid); + int32_t seqid = send_renew_delegation_token(token_str_form); + return recv_renew_delegation_token(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_list_privileges(const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) +int32_t ThriftHiveMetastoreConcurrentClient::send_renew_delegation_token(const std::string& token_str_form) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("list_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("renew_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_list_privileges_pargs args; - args.principal_name = &principal_name; - args.principal_type = &principal_type; - args.hiveObject = &hiveObject; + ThriftHiveMetastore_renew_delegation_token_pargs args; + args.token_str_form = &token_str_form; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74771,7 +77264,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_list_privileges(const std::str return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vector & _return, const int32_t seqid) +int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const int32_t seqid) { int32_t rseqid = 0; @@ -74800,7 +77293,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("list_privileges") != 0) { + if (fname.compare("renew_delegation_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74809,23 +77302,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "list_privileges failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "renew_delegation_token failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74835,20 +77328,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("grant_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("cancel_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_privileges_pargs args; - args.privileges = &privileges; + ThriftHiveMetastore_cancel_delegation_token_pargs args; + args.token_str_form = &token_str_form; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74859,7 +77352,90 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_grant_privileges(const Privile return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("cancel_delegation_token") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_cancel_delegation_token_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +bool ThriftHiveMetastoreConcurrentClient::add_token(const std::string& token_identifier, const std::string& delegation_token) +{ + int32_t seqid = send_add_token(token_identifier, delegation_token); + return recv_add_token(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_add_token(const std::string& token_identifier, const std::string& delegation_token) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("add_token", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_add_token_pargs args; + args.token_identifier = &token_identifier; + args.delegation_token = &delegation_token; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) { int32_t rseqid = 0; @@ -74888,7 +77464,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t se iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_privileges") != 0) { + if (fname.compare("add_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74898,7 +77474,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t se throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_grant_privileges_presult result; + ThriftHiveMetastore_add_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -74908,12 +77484,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t se sentry.commit(); return _return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_privileges failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_token failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74923,20 +77495,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t se } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::revoke_privileges(const PrivilegeBag& privileges) +bool ThriftHiveMetastoreConcurrentClient::remove_token(const std::string& token_identifier) { - int32_t seqid = send_revoke_privileges(privileges); - return recv_revoke_privileges(seqid); + int32_t seqid = send_remove_token(token_identifier); + return recv_remove_token(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_privileges(const PrivilegeBag& privileges) +int32_t ThriftHiveMetastoreConcurrentClient::send_remove_token(const std::string& token_identifier) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("remove_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_revoke_privileges_pargs args; - args.privileges = &privileges; + ThriftHiveMetastore_remove_token_pargs args; + args.token_identifier = &token_identifier; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74947,7 +77519,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_privileges(const Privil return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) { int32_t rseqid = 0; @@ -74976,7 +77548,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("revoke_privileges") != 0) { + if (fname.compare("remove_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74986,7 +77558,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t s throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_revoke_privileges_presult result; + ThriftHiveMetastore_remove_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -74996,12 +77568,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t s sentry.commit(); return _return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_privileges failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_token failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -75011,20 +77579,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t s } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const GrantRevokePrivilegeRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_token(std::string& _return, const std::string& token_identifier) { - int32_t seqid = send_grant_revoke_privileges(request); - recv_grant_revoke_privileges(_return, seqid); + int32_t seqid = send_get_token(token_identifier); + recv_get_token(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_privileges(const GrantRevokePrivilegeRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_token(const std::string& token_identifier) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("grant_revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_revoke_privileges_pargs args; - args.request = &request; + ThriftHiveMetastore_get_token_pargs args; + args.token_identifier = &token_identifier; args.write(oprot_); oprot_->writeMessageEnd(); @@ -75035,7 +77603,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_privileges(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -75064,7 +77632,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevo iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_revoke_privileges") != 0) { + if (fname.compare("get_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -75073,7 +77641,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevo using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_grant_revoke_privileges_presult result; + ThriftHiveMetastore_get_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -75084,12 +77652,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevo sentry.commit(); return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_privileges failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_token failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -75099,21 +77663,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevo } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::set_ugi(std::vector & _return, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreConcurrentClient::get_all_token_identifiers(std::vector & _return) { - int32_t seqid = send_set_ugi(user_name, group_names); - recv_set_ugi(_return, seqid); + int32_t seqid = send_get_all_token_identifiers(); + recv_get_all_token_identifiers(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_set_ugi(const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_token_identifiers() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("set_ugi", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_token_identifiers", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_set_ugi_pargs args; - args.user_name = &user_name; - args.group_names = &group_names; + ThriftHiveMetastore_get_all_token_identifiers_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -75124,7 +77686,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_set_ugi(const std::string& use return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -75153,7 +77715,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("set_ugi") != 0) { + if (fname.compare("get_all_token_identifiers") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -75162,7 +77724,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_set_ugi_presult result; + ThriftHiveMetastore_get_all_token_identifiers_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -75173,12 +77735,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector sentry.commit(); return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_ugi failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_token_identifiers failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -75188,21 +77746,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_delegation_token(std::string& _return, const std::string& token_owner, const std::string& renewer_kerberos_principal_name) +int32_t ThriftHiveMetastoreConcurrentClient::add_master_key(const std::string& key) { - int32_t seqid = send_get_delegation_token(token_owner, renewer_kerberos_principal_name); - recv_get_delegation_token(_return, seqid); + int32_t seqid = send_add_master_key(key); + return recv_add_master_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_delegation_token(const std::string& token_owner, const std::string& renewer_kerberos_principal_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_master_key(const std::string& key) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_master_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_delegation_token_pargs args; - args.token_owner = &token_owner; - args.renewer_kerberos_principal_name = &renewer_kerberos_principal_name; + ThriftHiveMetastore_add_master_key_pargs args; + args.key = &key; args.write(oprot_); oprot_->writeMessageEnd(); @@ -75213,7 +77770,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_delegation_token(const std return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& _return, const int32_t seqid) +int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t seqid) { int32_t rseqid = 0; @@ -75242,7 +77799,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_delegation_token") != 0) { + if (fname.compare("add_master_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -75251,23 +77808,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_delegation_token_presult result; + int32_t _return; + ThriftHiveMetastore_add_master_key_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_delegation_token failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_master_key failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -75277,20 +77834,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& } // end while(true) } -int64_t ThriftHiveMetastoreConcurrentClient::renew_delegation_token(const std::string& token_str_form) +void ThriftHiveMetastoreConcurrentClient::update_master_key(const int32_t seq_number, const std::string& key) { - int32_t seqid = send_renew_delegation_token(token_str_form); - return recv_renew_delegation_token(seqid); + int32_t seqid = send_update_master_key(seq_number, key); + recv_update_master_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_renew_delegation_token(const std::string& token_str_form) +int32_t ThriftHiveMetastoreConcurrentClient::send_update_master_key(const int32_t seq_number, const std::string& key) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("renew_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("update_master_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_renew_delegation_token_pargs args; - args.token_str_form = &token_str_form; + ThriftHiveMetastore_update_master_key_pargs args; + args.seq_number = &seq_number; + args.key = &key; args.write(oprot_); oprot_->writeMessageEnd(); @@ -75301,7 +77859,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_renew_delegation_token(const s return cseqid; } -int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t seqid) { int32_t rseqid = 0; @@ -75330,7 +77888,7 @@ int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const i iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("renew_delegation_token") != 0) { + if (fname.compare("update_master_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -75339,23 +77897,21 @@ int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const i using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int64_t _return; - ThriftHiveMetastore_renew_delegation_token_presult result; - result.success = &_return; + ThriftHiveMetastore_update_master_key_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - sentry.commit(); - return _return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "renew_delegation_token failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -75365,20 +77921,20 @@ int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const i } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::cancel_delegation_token(const std::string& token_str_form) +bool ThriftHiveMetastoreConcurrentClient::remove_master_key(const int32_t key_seq) { - int32_t seqid = send_cancel_delegation_token(token_str_form); - recv_cancel_delegation_token(seqid); + int32_t seqid = send_remove_master_key(key_seq); + return recv_remove_master_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_cancel_delegation_token(const std::string& token_str_form) +int32_t ThriftHiveMetastoreConcurrentClient::send_remove_master_key(const int32_t key_seq) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("cancel_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("remove_master_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_cancel_delegation_token_pargs args; - args.token_str_form = &token_str_form; + ThriftHiveMetastore_remove_master_key_pargs args; + args.key_seq = &key_seq; args.write(oprot_); oprot_->writeMessageEnd(); @@ -75389,7 +77945,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_cancel_delegation_token(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t seqid) { int32_t rseqid = 0; @@ -75418,7 +77974,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("cancel_delegation_token") != 0) { + if (fname.compare("remove_master_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -75427,17 +77983,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_cancel_delegation_token_presult result; + bool _return; + ThriftHiveMetastore_remove_master_key_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { sentry.commit(); - throw result.o1; + return _return; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_master_key failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -75447,21 +78005,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::add_token(const std::string& token_identifier, const std::string& delegation_token) +void ThriftHiveMetastoreConcurrentClient::get_master_keys(std::vector & _return) { - int32_t seqid = send_add_token(token_identifier, delegation_token); - return recv_add_token(seqid); + int32_t seqid = send_get_master_keys(); + recv_get_master_keys(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_token(const std::string& token_identifier, const std::string& delegation_token) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_master_keys() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_master_keys", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_token_pargs args; - args.token_identifier = &token_identifier; - args.delegation_token = &delegation_token; + ThriftHiveMetastore_get_master_keys_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -75472,7 +78028,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_token(const std::string& t return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -75501,7 +78057,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_token") != 0) { + if (fname.compare("get_master_keys") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -75510,19 +78066,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_add_token_presult result; + ThriftHiveMetastore_get_master_keys_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_token failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_master_keys failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -75532,20 +78088,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::remove_token(const std::string& token_identifier) +void ThriftHiveMetastoreConcurrentClient::get_open_txns(GetOpenTxnsResponse& _return) { - int32_t seqid = send_remove_token(token_identifier); - return recv_remove_token(seqid); + int32_t seqid = send_get_open_txns(); + recv_get_open_txns(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_remove_token(const std::string& token_identifier) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("remove_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_open_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_remove_token_pargs args; - args.token_identifier = &token_identifier; + ThriftHiveMetastore_get_open_txns_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -75556,7 +78111,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_remove_token(const std::string return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -75585,7 +78140,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("remove_token") != 0) { + if (fname.compare("get_open_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -75594,19 +78149,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_remove_token_presult result; + ThriftHiveMetastore_get_open_txns_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_token failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -75616,20 +78171,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_token(std::string& _return, const std::string& token_identifier) +void ThriftHiveMetastoreConcurrentClient::get_open_txns_info(GetOpenTxnsInfoResponse& _return) { - int32_t seqid = send_get_token(token_identifier); - recv_get_token(_return, seqid); + int32_t seqid = send_get_open_txns_info(); + recv_get_open_txns_info(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_token(const std::string& token_identifier) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns_info() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_open_txns_info", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_token_pargs args; - args.token_identifier = &token_identifier; + ThriftHiveMetastore_get_open_txns_info_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -75640,7 +78194,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_token(const std::string& t return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInfoResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -75669,7 +78223,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_token") != 0) { + if (fname.compare("get_open_txns_info") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -75678,7 +78232,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_token_presult result; + ThriftHiveMetastore_get_open_txns_info_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -75690,7 +78244,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, c return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_token failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns_info failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -75700,19 +78254,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, c } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_all_token_identifiers(std::vector & _return) +void ThriftHiveMetastoreConcurrentClient::open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& rqst) { - int32_t seqid = send_get_all_token_identifiers(); - recv_get_all_token_identifiers(_return, seqid); + int32_t seqid = send_open_txns(rqst); + recv_open_txns(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_token_identifiers() +int32_t ThriftHiveMetastoreConcurrentClient::send_open_txns(const OpenTxnRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_all_token_identifiers", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("open_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_token_identifiers_pargs args; + ThriftHiveMetastore_open_txns_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -75723,7 +78278,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_token_identifiers() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -75752,7 +78307,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_token_identifiers") != 0) { + if (fname.compare("open_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -75761,7 +78316,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_all_token_identifiers_presult result; + ThriftHiveMetastore_open_txns_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -75773,7 +78328,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_token_identifiers failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "open_txns failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -75783,20 +78338,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve } // end while(true) } -int32_t ThriftHiveMetastoreConcurrentClient::add_master_key(const std::string& key) +void ThriftHiveMetastoreConcurrentClient::abort_txn(const AbortTxnRequest& rqst) { - int32_t seqid = send_add_master_key(key); - return recv_add_master_key(seqid); + int32_t seqid = send_abort_txn(rqst); + recv_abort_txn(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_master_key(const std::string& key) +int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txn(const AbortTxnRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("abort_txn", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_master_key_pargs args; - args.key = &key; + ThriftHiveMetastore_abort_txn_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -75807,7 +78362,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_master_key(const std::stri return cseqid; } -int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) { int32_t rseqid = 0; @@ -75836,7 +78391,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_master_key") != 0) { + if (fname.compare("abort_txn") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -75845,23 +78400,17 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t s using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int32_t _return; - ThriftHiveMetastore_add_master_key_presult result; - result.success = &_return; + ThriftHiveMetastore_abort_txn_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - sentry.commit(); - return _return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_master_key failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -75871,21 +78420,20 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t s } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::update_master_key(const int32_t seq_number, const std::string& key) +void ThriftHiveMetastoreConcurrentClient::abort_txns(const AbortTxnsRequest& rqst) { - int32_t seqid = send_update_master_key(seq_number, key); - recv_update_master_key(seqid); + int32_t seqid = send_abort_txns(rqst); + recv_abort_txns(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_update_master_key(const int32_t seq_number, const std::string& key) +int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txns(const AbortTxnsRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("update_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("abort_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_update_master_key_pargs args; - args.seq_number = &seq_number; - args.key = &key; + ThriftHiveMetastore_abort_txns_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -75896,7 +78444,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_update_master_key(const int32_ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) { int32_t rseqid = 0; @@ -75925,7 +78473,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("update_master_key") != 0) { + if (fname.compare("abort_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -75934,7 +78482,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t s using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_update_master_key_presult result; + ThriftHiveMetastore_abort_txns_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -75943,10 +78491,6 @@ void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t s sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } sentry.commit(); return; } @@ -75958,20 +78502,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t s } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::remove_master_key(const int32_t key_seq) +void ThriftHiveMetastoreConcurrentClient::commit_txn(const CommitTxnRequest& rqst) { - int32_t seqid = send_remove_master_key(key_seq); - return recv_remove_master_key(seqid); + int32_t seqid = send_commit_txn(rqst); + recv_commit_txn(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_remove_master_key(const int32_t key_seq) +int32_t ThriftHiveMetastoreConcurrentClient::send_commit_txn(const CommitTxnRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("remove_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("commit_txn", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_remove_master_key_pargs args; - args.key_seq = &key_seq; + ThriftHiveMetastore_commit_txn_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -75982,7 +78526,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_remove_master_key(const int32_ return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) { int32_t rseqid = 0; @@ -76011,7 +78555,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("remove_master_key") != 0) { + if (fname.compare("commit_txn") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -76020,19 +78564,21 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t s using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_remove_master_key_presult result; - result.success = &_return; + ThriftHiveMetastore_commit_txn_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { + if (result.__isset.o1) { sentry.commit(); - return _return; + throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_master_key failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -76042,19 +78588,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t s } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_master_keys(std::vector & _return) +void ThriftHiveMetastoreConcurrentClient::lock(LockResponse& _return, const LockRequest& rqst) { - int32_t seqid = send_get_master_keys(); - recv_get_master_keys(_return, seqid); + int32_t seqid = send_lock(rqst); + recv_lock(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_master_keys() +int32_t ThriftHiveMetastoreConcurrentClient::send_lock(const LockRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_master_keys", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("lock", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_master_keys_pargs args; + ThriftHiveMetastore_lock_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -76065,7 +78612,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_master_keys() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -76094,7 +78641,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_master_keys") != 0) { + if (fname.compare("lock") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -76103,7 +78650,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorreadMessageEnd(); @@ -76114,8 +78661,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -76125,19 +78680,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_open_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("check_lock", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_open_txns_pargs args; + ThriftHiveMetastore_check_lock_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -76148,7 +78704,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -76177,7 +78733,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_open_txns") != 0) { + if (fname.compare("check_lock") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -76186,7 +78742,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_open_txns_presult result; + ThriftHiveMetastore_check_lock_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -76197,91 +78753,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse sentry.commit(); return; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns failed: unknown result"); - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::get_open_txns_info(GetOpenTxnsInfoResponse& _return) -{ - int32_t seqid = send_get_open_txns_info(); - recv_get_open_txns_info(_return, seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns_info() -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_open_txns_info", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_get_open_txns_info_pargs args; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInfoResponse& _return, const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + if (result.__isset.o1) { sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + throw result.o1; } - if (fname.compare("get_open_txns_info") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; } - ThriftHiveMetastore_get_open_txns_info_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o3) { sentry.commit(); - return; + throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns_info failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "check_lock failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -76291,19 +78776,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInf } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::unlock(const UnlockRequest& rqst) { - int32_t seqid = send_open_txns(rqst); - recv_open_txns(_return, seqid); + int32_t seqid = send_unlock(rqst); + recv_unlock(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_open_txns(const OpenTxnRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_unlock(const UnlockRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("open_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("unlock", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_open_txns_pargs args; + ThriftHiveMetastore_unlock_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -76315,7 +78800,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_open_txns(const OpenTxnRequest return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) { int32_t rseqid = 0; @@ -76344,7 +78829,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _retu iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("open_txns") != 0) { + if (fname.compare("unlock") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -76353,19 +78838,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _retu using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_open_txns_presult result; - result.success = &_return; + ThriftHiveMetastore_unlock_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "open_txns failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -76375,19 +78862,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _retu } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::abort_txn(const AbortTxnRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::show_locks(ShowLocksResponse& _return, const ShowLocksRequest& rqst) { - int32_t seqid = send_abort_txn(rqst); - recv_abort_txn(seqid); + int32_t seqid = send_show_locks(rqst); + recv_show_locks(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txn(const AbortTxnRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_show_locks(const ShowLocksRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("abort_txn", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("show_locks", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_abort_txn_pargs args; + ThriftHiveMetastore_show_locks_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -76399,7 +78886,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txn(const AbortTxnReques return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -76428,7 +78915,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("abort_txn") != 0) { + if (fname.compare("show_locks") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -76437,17 +78924,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_abort_txn_presult result; + ThriftHiveMetastore_show_locks_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_locks failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -76457,20 +78946,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::abort_txns(const AbortTxnsRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::heartbeat(const HeartbeatRequest& ids) { - int32_t seqid = send_abort_txns(rqst); - recv_abort_txns(seqid); + int32_t seqid = send_heartbeat(ids); + recv_heartbeat(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txns(const AbortTxnsRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat(const HeartbeatRequest& ids) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("abort_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_abort_txns_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_heartbeat_pargs args; + args.ids = &ids; args.write(oprot_); oprot_->writeMessageEnd(); @@ -76481,7 +78970,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txns(const AbortTxnsRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) { int32_t rseqid = 0; @@ -76510,7 +78999,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("abort_txns") != 0) { + if (fname.compare("heartbeat") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -76519,7 +79008,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_abort_txns_presult result; + ThriftHiveMetastore_heartbeat_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -76528,6 +79017,14 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } sentry.commit(); return; } @@ -76539,20 +79036,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::commit_txn(const CommitTxnRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const HeartbeatTxnRangeRequest& txns) { - int32_t seqid = send_commit_txn(rqst); - recv_commit_txn(seqid); + int32_t seqid = send_heartbeat_txn_range(txns); + recv_heartbeat_txn_range(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_commit_txn(const CommitTxnRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat_txn_range(const HeartbeatTxnRangeRequest& txns) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("commit_txn", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_commit_txn_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_heartbeat_txn_range_pargs args; + args.txns = &txns; args.write(oprot_); oprot_->writeMessageEnd(); @@ -76563,7 +79060,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_commit_txn(const CommitTxnRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -76592,7 +79089,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("commit_txn") != 0) { + if (fname.compare("heartbeat_txn_range") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -76601,19 +79098,95 @@ void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_commit_txn_presult result; + ThriftHiveMetastore_heartbeat_txn_range_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o2) { + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_txn_range failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::compact(const CompactionRequest& rqst) +{ + int32_t seqid = send_compact(rqst); + recv_compact(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_compact(const CompactionRequest& rqst) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("compact", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_compact_pargs args; + args.rqst = &rqst; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); sentry.commit(); - throw result.o2; + throw x; } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("compact") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_compact_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); return; } @@ -76625,19 +79198,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::lock(LockResponse& _return, const LockRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::compact2(CompactionResponse& _return, const CompactionRequest& rqst) { - int32_t seqid = send_lock(rqst); - recv_lock(_return, seqid); + int32_t seqid = send_compact2(rqst); + recv_compact2(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_lock(const LockRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_compact2(const CompactionRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("lock", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("compact2", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_lock_pargs args; + ThriftHiveMetastore_compact2_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -76649,7 +79222,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_lock(const LockRequest& rqst) return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_compact2(CompactionResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -76678,7 +79251,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("lock") != 0) { + if (fname.compare("compact2") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -76687,7 +79260,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_lock_presult result; + ThriftHiveMetastore_compact2_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -76698,16 +79271,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const sentry.commit(); return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "lock failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "compact2 failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -76717,19 +79282,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::check_lock(LockResponse& _return, const CheckLockRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::show_compact(ShowCompactResponse& _return, const ShowCompactRequest& rqst) { - int32_t seqid = send_check_lock(rqst); - recv_check_lock(_return, seqid); + int32_t seqid = send_show_compact(rqst); + recv_show_compact(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_check_lock(const CheckLockRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_show_compact(const ShowCompactRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("check_lock", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_check_lock_pargs args; + ThriftHiveMetastore_show_compact_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -76741,7 +79306,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_check_lock(const CheckLockRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -76770,7 +79335,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("check_lock") != 0) { + if (fname.compare("show_compact") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -76779,7 +79344,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_check_lock_presult result; + ThriftHiveMetastore_show_compact_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -76790,20 +79355,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, sentry.commit(); return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "check_lock failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_compact failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -76813,19 +79366,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::unlock(const UnlockRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::add_dynamic_partitions(const AddDynamicPartitions& rqst) { - int32_t seqid = send_unlock(rqst); - recv_unlock(seqid); + int32_t seqid = send_add_dynamic_partitions(rqst); + recv_add_dynamic_partitions(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_unlock(const UnlockRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_dynamic_partitions(const AddDynamicPartitions& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("unlock", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_unlock_pargs args; + ThriftHiveMetastore_add_dynamic_partitions_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -76837,7 +79390,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_unlock(const UnlockRequest& rq return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int32_t seqid) { int32_t rseqid = 0; @@ -76866,7 +79419,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("unlock") != 0) { + if (fname.compare("add_dynamic_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -76875,7 +79428,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_unlock_presult result; + ThriftHiveMetastore_add_dynamic_partitions_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -76899,19 +79452,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::show_locks(ShowLocksResponse& _return, const ShowLocksRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::get_next_notification(NotificationEventResponse& _return, const NotificationEventRequest& rqst) { - int32_t seqid = send_show_locks(rqst); - recv_show_locks(_return, seqid); + int32_t seqid = send_get_next_notification(rqst); + recv_get_next_notification(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_show_locks(const ShowLocksRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_next_notification(const NotificationEventRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("show_locks", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_show_locks_pargs args; + ThriftHiveMetastore_get_next_notification_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -76923,7 +79476,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_show_locks(const ShowLocksRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(NotificationEventResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -76952,7 +79505,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("show_locks") != 0) { + if (fname.compare("get_next_notification") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -76961,7 +79514,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_show_locks_presult result; + ThriftHiveMetastore_get_next_notification_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -76973,7 +79526,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_locks failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_next_notification failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -76983,20 +79536,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::heartbeat(const HeartbeatRequest& ids) +void ThriftHiveMetastoreConcurrentClient::get_current_notificationEventId(CurrentNotificationEventId& _return) { - int32_t seqid = send_heartbeat(ids); - recv_heartbeat(seqid); + int32_t seqid = send_get_current_notificationEventId(); + recv_get_current_notificationEventId(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat(const HeartbeatRequest& ids) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_current_notificationEventId() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_heartbeat_pargs args; - args.ids = &ids; + ThriftHiveMetastore_get_current_notificationEventId_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -77007,7 +79559,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat(const HeartbeatReque return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(CurrentNotificationEventId& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -77036,7 +79588,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("heartbeat") != 0) { + if (fname.compare("get_current_notificationEventId") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -77045,25 +79597,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_heartbeat_presult result; + ThriftHiveMetastore_get_current_notificationEventId_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o3; + return; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_current_notificationEventId failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -77073,20 +79619,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const HeartbeatTxnRangeRequest& txns) +void ThriftHiveMetastoreConcurrentClient::get_notification_events_count(NotificationEventsCountResponse& _return, const NotificationEventsCountRequest& rqst) { - int32_t seqid = send_heartbeat_txn_range(txns); - recv_heartbeat_txn_range(_return, seqid); + int32_t seqid = send_get_notification_events_count(rqst); + recv_get_notification_events_count(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat_txn_range(const HeartbeatTxnRangeRequest& txns) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_notification_events_count(const NotificationEventsCountRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_notification_events_count", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_heartbeat_txn_range_pargs args; - args.txns = &txns; + ThriftHiveMetastore_get_notification_events_count_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -77097,7 +79643,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat_txn_range(const Hear return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_notification_events_count(NotificationEventsCountResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -77126,7 +79672,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnR iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("heartbeat_txn_range") != 0) { + if (fname.compare("get_notification_events_count") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -77135,7 +79681,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnR using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_heartbeat_txn_range_presult result; + ThriftHiveMetastore_get_notification_events_count_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -77147,85 +79693,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnR return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_txn_range failed: unknown result"); - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::compact(const CompactionRequest& rqst) -{ - int32_t seqid = send_compact(rqst); - recv_compact(seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_compact(const CompactionRequest& rqst) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("compact", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_compact_pargs args; - args.rqst = &rqst; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("compact") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - ThriftHiveMetastore_compact_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - sentry.commit(); - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_notification_events_count failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -77235,19 +79703,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::compact2(CompactionResponse& _return, const CompactionRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::fire_listener_event(FireEventResponse& _return, const FireEventRequest& rqst) { - int32_t seqid = send_compact2(rqst); - recv_compact2(_return, seqid); + int32_t seqid = send_fire_listener_event(rqst); + recv_fire_listener_event(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_compact2(const CompactionRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_fire_listener_event(const FireEventRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("compact2", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_compact2_pargs args; + ThriftHiveMetastore_fire_listener_event_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -77259,7 +79727,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_compact2(const CompactionReque return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_compact2(CompactionResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -77288,7 +79756,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact2(CompactionResponse& _ret iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("compact2") != 0) { + if (fname.compare("fire_listener_event") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -77297,7 +79765,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact2(CompactionResponse& _ret using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_compact2_presult result; + ThriftHiveMetastore_fire_listener_event_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -77309,7 +79777,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact2(CompactionResponse& _ret return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "compact2 failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fire_listener_event failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -77319,20 +79787,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact2(CompactionResponse& _ret } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::show_compact(ShowCompactResponse& _return, const ShowCompactRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::flushCache() { - int32_t seqid = send_show_compact(rqst); - recv_show_compact(_return, seqid); + int32_t seqid = send_flushCache(); + recv_flushCache(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_show_compact(const ShowCompactRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_flushCache() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_show_compact_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_flushCache_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -77343,7 +79810,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_show_compact(const ShowCompact return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) { int32_t rseqid = 0; @@ -77372,7 +79839,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("show_compact") != 0) { + if (fname.compare("flushCache") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -77381,19 +79848,13 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_show_compact_presult result; - result.success = &_return; + ThriftHiveMetastore_flushCache_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_compact failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -77403,20 +79864,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_dynamic_partitions(const AddDynamicPartitions& rqst) +void ThriftHiveMetastoreConcurrentClient::cm_recycle(CmRecycleResponse& _return, const CmRecycleRequest& request) { - int32_t seqid = send_add_dynamic_partitions(rqst); - recv_add_dynamic_partitions(seqid); + int32_t seqid = send_cm_recycle(request); + recv_cm_recycle(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_dynamic_partitions(const AddDynamicPartitions& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_cm_recycle(const CmRecycleRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("cm_recycle", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_dynamic_partitions_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_cm_recycle_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -77427,7 +79888,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_dynamic_partitions(const A return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_cm_recycle(CmRecycleResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -77456,7 +79917,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int3 iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_dynamic_partitions") != 0) { + if (fname.compare("cm_recycle") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -77465,21 +79926,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int3 using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_dynamic_partitions_presult result; + ThriftHiveMetastore_cm_recycle_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cm_recycle failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -77489,20 +79952,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int3 } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_next_notification(NotificationEventResponse& _return, const NotificationEventRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) { - int32_t seqid = send_get_next_notification(rqst); - recv_get_next_notification(_return, seqid); + int32_t seqid = send_get_file_metadata_by_expr(req); + recv_get_file_metadata_by_expr(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_next_notification(const NotificationEventRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata_by_expr(const GetFileMetadataByExprRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_next_notification_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_get_file_metadata_by_expr_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -77513,7 +79976,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_next_notification(const No return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(NotificationEventResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -77542,7 +80005,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_next_notification") != 0) { + if (fname.compare("get_file_metadata_by_expr") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -77551,7 +80014,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_next_notification_presult result; + ThriftHiveMetastore_get_file_metadata_by_expr_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -77563,7 +80026,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_next_notification failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata_by_expr failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -77573,19 +80036,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_current_notificationEventId(CurrentNotificationEventId& _return) +void ThriftHiveMetastoreConcurrentClient::get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) { - int32_t seqid = send_get_current_notificationEventId(); - recv_get_current_notificationEventId(_return, seqid); + int32_t seqid = send_get_file_metadata(req); + recv_get_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_current_notificationEventId() +int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata(const GetFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_current_notificationEventId_pargs args; + ThriftHiveMetastore_get_file_metadata_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -77596,7 +80060,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_current_notificationEventI return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(CurrentNotificationEventId& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -77625,7 +80089,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_current_notificationEventId") != 0) { + if (fname.compare("get_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -77634,7 +80098,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_current_notificationEventId_presult result; + ThriftHiveMetastore_get_file_metadata_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -77646,7 +80110,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_current_notificationEventId failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -77656,20 +80120,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_notification_events_count(NotificationEventsCountResponse& _return, const NotificationEventsCountRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) { - int32_t seqid = send_get_notification_events_count(rqst); - recv_get_notification_events_count(_return, seqid); + int32_t seqid = send_put_file_metadata(req); + recv_put_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_notification_events_count(const NotificationEventsCountRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_put_file_metadata(const PutFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_notification_events_count", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_notification_events_count_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_put_file_metadata_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -77680,7 +80144,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_notification_events_count( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_notification_events_count(NotificationEventsCountResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -77709,7 +80173,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_notification_events_count(Not iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_notification_events_count") != 0) { + if (fname.compare("put_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -77718,7 +80182,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_notification_events_count(Not using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_notification_events_count_presult result; + ThriftHiveMetastore_put_file_metadata_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -77730,7 +80194,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_notification_events_count(Not return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_notification_events_count failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "put_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -77740,20 +80204,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_notification_events_count(Not } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::fire_listener_event(FireEventResponse& _return, const FireEventRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) { - int32_t seqid = send_fire_listener_event(rqst); - recv_fire_listener_event(_return, seqid); + int32_t seqid = send_clear_file_metadata(req); + recv_clear_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_fire_listener_event(const FireEventRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_clear_file_metadata(const ClearFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_fire_listener_event_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_clear_file_metadata_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -77764,7 +80228,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_fire_listener_event(const Fire return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -77793,7 +80257,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("fire_listener_event") != 0) { + if (fname.compare("clear_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -77802,7 +80266,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_fire_listener_event_presult result; + ThriftHiveMetastore_clear_file_metadata_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -77814,7 +80278,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fire_listener_event failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "clear_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -77824,19 +80288,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::flushCache() +void ThriftHiveMetastoreConcurrentClient::cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) { - int32_t seqid = send_flushCache(); - recv_flushCache(seqid); + int32_t seqid = send_cache_file_metadata(req); + recv_cache_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_flushCache() +int32_t ThriftHiveMetastoreConcurrentClient::send_cache_file_metadata(const CacheFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_flushCache_pargs args; + ThriftHiveMetastore_cache_file_metadata_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -77847,7 +80312,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_flushCache() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -77876,7 +80341,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("flushCache") != 0) { + if (fname.compare("cache_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -77885,13 +80350,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_flushCache_presult result; + ThriftHiveMetastore_cache_file_metadata_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - sentry.commit(); - return; + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cache_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -77901,20 +80372,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::cm_recycle(CmRecycleResponse& _return, const CmRecycleRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_metastore_db_uuid(std::string& _return) { - int32_t seqid = send_cm_recycle(request); - recv_cm_recycle(_return, seqid); + int32_t seqid = send_get_metastore_db_uuid(); + recv_get_metastore_db_uuid(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_cm_recycle(const CmRecycleRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_metastore_db_uuid() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("cm_recycle", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_metastore_db_uuid", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_cm_recycle_pargs args; - args.request = &request; + ThriftHiveMetastore_get_metastore_db_uuid_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -77925,7 +80395,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_cm_recycle(const CmRecycleRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_cm_recycle(CmRecycleResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_metastore_db_uuid(std::string& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -77954,7 +80424,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cm_recycle(CmRecycleResponse& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("cm_recycle") != 0) { + if (fname.compare("get_metastore_db_uuid") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -77963,7 +80433,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cm_recycle(CmRecycleResponse& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_cm_recycle_presult result; + ThriftHiveMetastore_get_metastore_db_uuid_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -77979,7 +80449,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cm_recycle(CmRecycleResponse& _re throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cm_recycle failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_metastore_db_uuid failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -77989,20 +80459,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_cm_recycle(CmRecycleResponse& _re } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) +void ThriftHiveMetastoreConcurrentClient::create_resource_plan(WMCreateResourcePlanResponse& _return, const WMCreateResourcePlanRequest& request) { - int32_t seqid = send_get_file_metadata_by_expr(req); - recv_get_file_metadata_by_expr(_return, seqid); + int32_t seqid = send_create_resource_plan(request); + recv_create_resource_plan(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata_by_expr(const GetFileMetadataByExprRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_resource_plan(const WMCreateResourcePlanRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_file_metadata_by_expr_pargs args; - args.req = &req; + ThriftHiveMetastore_create_resource_plan_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -78013,7 +80483,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata_by_expr(cons return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_resource_plan(WMCreateResourcePlanResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -78042,7 +80512,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFile iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_file_metadata_by_expr") != 0) { + if (fname.compare("create_resource_plan") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -78051,7 +80521,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFile using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_file_metadata_by_expr_presult result; + ThriftHiveMetastore_create_resource_plan_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -78062,92 +80532,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFile sentry.commit(); return; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata_by_expr failed: unknown result"); - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) -{ - int32_t seqid = send_get_file_metadata(req); - recv_get_file_metadata(_return, seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata(const GetFileMetadataRequest& req) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_get_file_metadata_pargs args; - args.req = &req; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadataResult& _return, const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + if (result.__isset.o1) { sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + throw result.o1; } - if (fname.compare("get_file_metadata") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; } - ThriftHiveMetastore_get_file_metadata_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o3) { sentry.commit(); - return; + throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_resource_plan failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -78157,20 +80555,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadata } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_resource_plan(WMGetResourcePlanResponse& _return, const WMGetResourcePlanRequest& request) { - int32_t seqid = send_put_file_metadata(req); - recv_put_file_metadata(_return, seqid); + int32_t seqid = send_get_resource_plan(request); + recv_get_resource_plan(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_put_file_metadata(const PutFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_resource_plan(const WMGetResourcePlanRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_put_file_metadata_pargs args; - args.req = &req; + ThriftHiveMetastore_get_resource_plan_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -78181,7 +80579,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_put_file_metadata(const PutFil return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_resource_plan(WMGetResourcePlanResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -78210,7 +80608,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("put_file_metadata") != 0) { + if (fname.compare("get_resource_plan") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -78219,7 +80617,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_put_file_metadata_presult result; + ThriftHiveMetastore_get_resource_plan_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -78230,8 +80628,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata sentry.commit(); return; } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "put_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_resource_plan failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -78241,20 +80647,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_active_resource_plan(WMGetActiveResourcePlanResponse& _return, const WMGetActiveResourcePlanRequest& request) { - int32_t seqid = send_clear_file_metadata(req); - recv_clear_file_metadata(_return, seqid); + int32_t seqid = send_get_active_resource_plan(request); + recv_get_active_resource_plan(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_clear_file_metadata(const ClearFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_active_resource_plan(const WMGetActiveResourcePlanRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_active_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_clear_file_metadata_pargs args; - args.req = &req; + ThriftHiveMetastore_get_active_resource_plan_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -78265,7 +80671,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_clear_file_metadata(const Clea return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_active_resource_plan(WMGetActiveResourcePlanResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -78294,7 +80700,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("clear_file_metadata") != 0) { + if (fname.compare("get_active_resource_plan") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -78303,7 +80709,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_clear_file_metadata_presult result; + ThriftHiveMetastore_get_active_resource_plan_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -78314,8 +80720,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta sentry.commit(); return; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "clear_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_active_resource_plan failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -78325,20 +80735,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_all_resource_plans(WMGetAllResourcePlanResponse& _return, const WMGetAllResourcePlanRequest& request) { - int32_t seqid = send_cache_file_metadata(req); - recv_cache_file_metadata(_return, seqid); + int32_t seqid = send_get_all_resource_plans(request); + recv_get_all_resource_plans(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_cache_file_metadata(const CacheFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_resource_plans(const WMGetAllResourcePlanRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_resource_plans", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_cache_file_metadata_pargs args; - args.req = &req; + ThriftHiveMetastore_get_all_resource_plans_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -78349,7 +80759,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_cache_file_metadata(const Cach return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_all_resource_plans(WMGetAllResourcePlanResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -78378,7 +80788,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMeta iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("cache_file_metadata") != 0) { + if (fname.compare("get_all_resource_plans") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -78387,7 +80797,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMeta using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_cache_file_metadata_presult result; + ThriftHiveMetastore_get_all_resource_plans_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -78398,8 +80808,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMeta sentry.commit(); return; } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cache_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_resource_plans failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -78409,19 +80823,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMeta } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_metastore_db_uuid(std::string& _return) +void ThriftHiveMetastoreConcurrentClient::alter_resource_plan(WMAlterResourcePlanResponse& _return, const WMAlterResourcePlanRequest& request) { - int32_t seqid = send_get_metastore_db_uuid(); - recv_get_metastore_db_uuid(_return, seqid); + int32_t seqid = send_alter_resource_plan(request); + recv_alter_resource_plan(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_metastore_db_uuid() +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_resource_plan(const WMAlterResourcePlanRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_metastore_db_uuid", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_metastore_db_uuid_pargs args; + ThriftHiveMetastore_alter_resource_plan_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -78432,7 +80847,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_metastore_db_uuid() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_metastore_db_uuid(std::string& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_resource_plan(WMAlterResourcePlanResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -78461,7 +80876,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_metastore_db_uuid(std::string iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_metastore_db_uuid") != 0) { + if (fname.compare("alter_resource_plan") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -78470,7 +80885,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_metastore_db_uuid(std::string using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_metastore_db_uuid_presult result; + ThriftHiveMetastore_alter_resource_plan_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -78485,8 +80900,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_metastore_db_uuid(std::string sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_metastore_db_uuid failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "alter_resource_plan failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -78496,19 +80919,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_metastore_db_uuid(std::string } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_resource_plan(WMCreateResourcePlanResponse& _return, const WMCreateResourcePlanRequest& request) +void ThriftHiveMetastoreConcurrentClient::validate_resource_plan(WMValidateResourcePlanResponse& _return, const WMValidateResourcePlanRequest& request) { - int32_t seqid = send_create_resource_plan(request); - recv_create_resource_plan(_return, seqid); + int32_t seqid = send_validate_resource_plan(request); + recv_validate_resource_plan(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_resource_plan(const WMCreateResourcePlanRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_validate_resource_plan(const WMValidateResourcePlanRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("validate_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_resource_plan_pargs args; + ThriftHiveMetastore_validate_resource_plan_pargs args; args.request = &request; args.write(oprot_); @@ -78520,7 +80943,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_resource_plan(const WMC return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_resource_plan(WMCreateResourcePlanResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_validate_resource_plan(WMValidateResourcePlanResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -78549,7 +80972,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_resource_plan(WMCreateReso iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_resource_plan") != 0) { + if (fname.compare("validate_resource_plan") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -78558,7 +80981,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_resource_plan(WMCreateReso using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_resource_plan_presult result; + ThriftHiveMetastore_validate_resource_plan_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -78577,12 +81000,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_resource_plan(WMCreateReso sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_resource_plan failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "validate_resource_plan failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -78592,19 +81011,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_resource_plan(WMCreateReso } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_resource_plan(WMGetResourcePlanResponse& _return, const WMGetResourcePlanRequest& request) +void ThriftHiveMetastoreConcurrentClient::drop_resource_plan(WMDropResourcePlanResponse& _return, const WMDropResourcePlanRequest& request) { - int32_t seqid = send_get_resource_plan(request); - recv_get_resource_plan(_return, seqid); + int32_t seqid = send_drop_resource_plan(request); + recv_drop_resource_plan(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_resource_plan(const WMGetResourcePlanRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_resource_plan(const WMDropResourcePlanRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_resource_plan_pargs args; + ThriftHiveMetastore_drop_resource_plan_pargs args; args.request = &request; args.write(oprot_); @@ -78616,7 +81035,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_resource_plan(const WMGetR return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_resource_plan(WMGetResourcePlanResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_resource_plan(WMDropResourcePlanResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -78645,7 +81064,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_resource_plan(WMGetResourcePl iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_resource_plan") != 0) { + if (fname.compare("drop_resource_plan") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -78654,7 +81073,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_resource_plan(WMGetResourcePl using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_resource_plan_presult result; + ThriftHiveMetastore_drop_resource_plan_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -78673,8 +81092,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_resource_plan(WMGetResourcePl sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_resource_plan failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_resource_plan failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -78684,19 +81107,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_resource_plan(WMGetResourcePl } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_active_resource_plan(WMGetActiveResourcePlanResponse& _return, const WMGetActiveResourcePlanRequest& request) +void ThriftHiveMetastoreConcurrentClient::create_wm_trigger(WMCreateTriggerResponse& _return, const WMCreateTriggerRequest& request) { - int32_t seqid = send_get_active_resource_plan(request); - recv_get_active_resource_plan(_return, seqid); + int32_t seqid = send_create_wm_trigger(request); + recv_create_wm_trigger(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_active_resource_plan(const WMGetActiveResourcePlanRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_wm_trigger(const WMCreateTriggerRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_active_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_wm_trigger", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_active_resource_plan_pargs args; + ThriftHiveMetastore_create_wm_trigger_pargs args; args.request = &request; args.write(oprot_); @@ -78708,7 +81131,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_active_resource_plan(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_active_resource_plan(WMGetActiveResourcePlanResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_wm_trigger(WMCreateTriggerResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -78737,7 +81160,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_active_resource_plan(WMGetAct iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_active_resource_plan") != 0) { + if (fname.compare("create_wm_trigger") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -78746,7 +81169,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_active_resource_plan(WMGetAct using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_active_resource_plan_presult result; + ThriftHiveMetastore_create_wm_trigger_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -78757,12 +81180,24 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_active_resource_plan(WMGetAct sentry.commit(); return; } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } if (result.__isset.o2) { sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_active_resource_plan failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_wm_trigger failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -78772,19 +81207,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_active_resource_plan(WMGetAct } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_all_resource_plans(WMGetAllResourcePlanResponse& _return, const WMGetAllResourcePlanRequest& request) +void ThriftHiveMetastoreConcurrentClient::alter_wm_trigger(WMAlterTriggerResponse& _return, const WMAlterTriggerRequest& request) { - int32_t seqid = send_get_all_resource_plans(request); - recv_get_all_resource_plans(_return, seqid); + int32_t seqid = send_alter_wm_trigger(request); + recv_alter_wm_trigger(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_resource_plans(const WMGetAllResourcePlanRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_wm_trigger(const WMAlterTriggerRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_all_resource_plans", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_wm_trigger", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_resource_plans_pargs args; + ThriftHiveMetastore_alter_wm_trigger_pargs args; args.request = &request; args.write(oprot_); @@ -78796,7 +81231,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_resource_plans(const W return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_all_resource_plans(WMGetAllResourcePlanResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_wm_trigger(WMAlterTriggerResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -78825,7 +81260,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_resource_plans(WMGetAllRe iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_resource_plans") != 0) { + if (fname.compare("alter_wm_trigger") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -78834,7 +81269,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_resource_plans(WMGetAllRe using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_all_resource_plans_presult result; + ThriftHiveMetastore_alter_wm_trigger_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -78849,8 +81284,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_resource_plans(WMGetAllRe sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_resource_plans failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "alter_wm_trigger failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -78860,19 +81303,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_resource_plans(WMGetAllRe } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_resource_plan(WMAlterResourcePlanResponse& _return, const WMAlterResourcePlanRequest& request) +void ThriftHiveMetastoreConcurrentClient::drop_wm_trigger(WMDropTriggerResponse& _return, const WMDropTriggerRequest& request) { - int32_t seqid = send_alter_resource_plan(request); - recv_alter_resource_plan(_return, seqid); + int32_t seqid = send_drop_wm_trigger(request); + recv_drop_wm_trigger(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_resource_plan(const WMAlterResourcePlanRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_wm_trigger(const WMDropTriggerRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_wm_trigger", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_resource_plan_pargs args; + ThriftHiveMetastore_drop_wm_trigger_pargs args; args.request = &request; args.write(oprot_); @@ -78884,7 +81327,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_resource_plan(const WMAl return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_resource_plan(WMAlterResourcePlanResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_wm_trigger(WMDropTriggerResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -78913,7 +81356,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_resource_plan(WMAlterResour iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_resource_plan") != 0) { + if (fname.compare("drop_wm_trigger") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -78922,7 +81365,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_resource_plan(WMAlterResour using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_resource_plan_presult result; + ThriftHiveMetastore_drop_wm_trigger_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -78946,7 +81389,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_resource_plan(WMAlterResour throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "alter_resource_plan failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_wm_trigger failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -78956,19 +81399,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_resource_plan(WMAlterResour } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::validate_resource_plan(WMValidateResourcePlanResponse& _return, const WMValidateResourcePlanRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const WMGetTriggersForResourePlanRequest& request) { - int32_t seqid = send_validate_resource_plan(request); - recv_validate_resource_plan(_return, seqid); + int32_t seqid = send_get_triggers_for_resourceplan(request); + recv_get_triggers_for_resourceplan(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_validate_resource_plan(const WMValidateResourcePlanRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_triggers_for_resourceplan(const WMGetTriggersForResourePlanRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("validate_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_triggers_for_resourceplan", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_validate_resource_plan_pargs args; + ThriftHiveMetastore_get_triggers_for_resourceplan_pargs args; args.request = &request; args.write(oprot_); @@ -78980,7 +81423,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_validate_resource_plan(const W return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_validate_resource_plan(WMValidateResourcePlanResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -79009,7 +81452,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_validate_resource_plan(WMValidate iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("validate_resource_plan") != 0) { + if (fname.compare("get_triggers_for_resourceplan") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -79018,7 +81461,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_validate_resource_plan(WMValidate using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_validate_resource_plan_presult result; + ThriftHiveMetastore_get_triggers_for_resourceplan_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -79038,7 +81481,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_validate_resource_plan(WMValidate throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "validate_resource_plan failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_triggers_for_resourceplan failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -79048,19 +81491,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_validate_resource_plan(WMValidate } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_resource_plan(WMDropResourcePlanResponse& _return, const WMDropResourcePlanRequest& request) +void ThriftHiveMetastoreConcurrentClient::create_or_update_wm_pool(WMCreateOrUpdatePoolResponse& _return, const WMCreateOrUpdatePoolRequest& request) { - int32_t seqid = send_drop_resource_plan(request); - recv_drop_resource_plan(_return, seqid); + int32_t seqid = send_create_or_update_wm_pool(request); + recv_create_or_update_wm_pool(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_resource_plan(const WMDropResourcePlanRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_or_update_wm_pool(const WMCreateOrUpdatePoolRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_or_update_wm_pool", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_resource_plan_pargs args; + ThriftHiveMetastore_create_or_update_wm_pool_pargs args; args.request = &request; args.write(oprot_); @@ -79072,7 +81515,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_resource_plan(const WMDro return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_resource_plan(WMDropResourcePlanResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_or_update_wm_pool(WMCreateOrUpdatePoolResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -79101,7 +81544,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_resource_plan(WMDropResource iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_resource_plan") != 0) { + if (fname.compare("create_or_update_wm_pool") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -79110,7 +81553,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_resource_plan(WMDropResource using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_resource_plan_presult result; + ThriftHiveMetastore_create_or_update_wm_pool_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -79133,8 +81576,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_resource_plan(WMDropResource sentry.commit(); throw result.o3; } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_resource_plan failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_or_update_wm_pool failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -79144,19 +81591,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_resource_plan(WMDropResource } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_wm_trigger(WMCreateTriggerResponse& _return, const WMCreateTriggerRequest& request) +void ThriftHiveMetastoreConcurrentClient::drop_wm_pool(WMDropPoolResponse& _return, const WMDropPoolRequest& request) { - int32_t seqid = send_create_wm_trigger(request); - recv_create_wm_trigger(_return, seqid); + int32_t seqid = send_drop_wm_pool(request); + recv_drop_wm_pool(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_wm_trigger(const WMCreateTriggerRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_wm_pool(const WMDropPoolRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_wm_trigger", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_wm_pool", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_wm_trigger_pargs args; + ThriftHiveMetastore_drop_wm_pool_pargs args; args.request = &request; args.write(oprot_); @@ -79168,7 +81615,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_wm_trigger(const WMCrea return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_wm_trigger(WMCreateTriggerResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_wm_pool(WMDropPoolResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -79197,7 +81644,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_wm_trigger(WMCreateTrigger iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_wm_trigger") != 0) { + if (fname.compare("drop_wm_pool") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -79206,7 +81653,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_wm_trigger(WMCreateTrigger using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_wm_trigger_presult result; + ThriftHiveMetastore_drop_wm_pool_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -79229,12 +81676,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_wm_trigger(WMCreateTrigger sentry.commit(); throw result.o3; } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_wm_trigger failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_wm_pool failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -79244,19 +81687,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_wm_trigger(WMCreateTrigger } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_wm_trigger(WMAlterTriggerResponse& _return, const WMAlterTriggerRequest& request) +void ThriftHiveMetastoreConcurrentClient::create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request) { - int32_t seqid = send_alter_wm_trigger(request); - recv_alter_wm_trigger(_return, seqid); + int32_t seqid = send_create_or_update_wm_mapping(request); + recv_create_or_update_wm_mapping(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_wm_trigger(const WMAlterTriggerRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_or_update_wm_mapping(const WMCreateOrUpdateMappingRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_wm_trigger", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_or_update_wm_mapping", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_wm_trigger_pargs args; + ThriftHiveMetastore_create_or_update_wm_mapping_pargs args; args.request = &request; args.write(oprot_); @@ -79268,7 +81711,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_wm_trigger(const WMAlter return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_wm_trigger(WMAlterTriggerResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -79297,7 +81740,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_wm_trigger(WMAlterTriggerRe iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_wm_trigger") != 0) { + if (fname.compare("create_or_update_wm_mapping") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -79306,7 +81749,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_wm_trigger(WMAlterTriggerRe using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_wm_trigger_presult result; + ThriftHiveMetastore_create_or_update_wm_mapping_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -79329,8 +81772,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_wm_trigger(WMAlterTriggerRe sentry.commit(); throw result.o3; } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "alter_wm_trigger failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_or_update_wm_mapping failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -79340,19 +81787,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_wm_trigger(WMAlterTriggerRe } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_wm_trigger(WMDropTriggerResponse& _return, const WMDropTriggerRequest& request) +void ThriftHiveMetastoreConcurrentClient::drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request) { - int32_t seqid = send_drop_wm_trigger(request); - recv_drop_wm_trigger(_return, seqid); + int32_t seqid = send_drop_wm_mapping(request); + recv_drop_wm_mapping(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_wm_trigger(const WMDropTriggerRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_wm_mapping(const WMDropMappingRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_wm_trigger", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_wm_mapping", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_wm_trigger_pargs args; + ThriftHiveMetastore_drop_wm_mapping_pargs args; args.request = &request; args.write(oprot_); @@ -79364,7 +81811,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_wm_trigger(const WMDropTr return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_wm_trigger(WMDropTriggerResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_wm_mapping(WMDropMappingResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -79393,7 +81840,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_wm_trigger(WMDropTriggerResp iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_wm_trigger") != 0) { + if (fname.compare("drop_wm_mapping") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -79402,7 +81849,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_wm_trigger(WMDropTriggerResp using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_wm_trigger_presult result; + ThriftHiveMetastore_drop_wm_mapping_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -79426,7 +81873,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_wm_trigger(WMDropTriggerResp throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_wm_trigger failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_wm_mapping failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -79436,19 +81883,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_wm_trigger(WMDropTriggerResp } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const WMGetTriggersForResourePlanRequest& request) +void ThriftHiveMetastoreConcurrentClient::create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request) { - int32_t seqid = send_get_triggers_for_resourceplan(request); - recv_get_triggers_for_resourceplan(_return, seqid); + int32_t seqid = send_create_or_drop_wm_trigger_to_pool_mapping(request); + recv_create_or_drop_wm_trigger_to_pool_mapping(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_triggers_for_resourceplan(const WMGetTriggersForResourePlanRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_or_drop_wm_trigger_to_pool_mapping(const WMCreateOrDropTriggerToPoolMappingRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_triggers_for_resourceplan", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_or_drop_wm_trigger_to_pool_mapping", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_triggers_for_resourceplan_pargs args; + ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_pargs args; args.request = &request; args.write(oprot_); @@ -79460,7 +81907,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_triggers_for_resourceplan( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -79489,7 +81936,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_triggers_for_resourceplan(WMG iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_triggers_for_resourceplan") != 0) { + if (fname.compare("create_or_drop_wm_trigger_to_pool_mapping") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -79498,7 +81945,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_triggers_for_resourceplan(WMG using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_triggers_for_resourceplan_presult result; + ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -79517,8 +81964,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_triggers_for_resourceplan(WMG sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_triggers_for_resourceplan failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_or_drop_wm_trigger_to_pool_mapping failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index 5c4284b094..75529fc7dd 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -195,6 +195,11 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void alter_wm_trigger(WMAlterTriggerResponse& _return, const WMAlterTriggerRequest& request) = 0; virtual void drop_wm_trigger(WMDropTriggerResponse& _return, const WMDropTriggerRequest& request) = 0; virtual void get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const WMGetTriggersForResourePlanRequest& request) = 0; + virtual void create_or_update_wm_pool(WMCreateOrUpdatePoolResponse& _return, const WMCreateOrUpdatePoolRequest& request) = 0; + virtual void drop_wm_pool(WMDropPoolResponse& _return, const WMDropPoolRequest& request) = 0; + virtual void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request) = 0; + virtual void drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request) = 0; + virtual void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request) = 0; }; class ThriftHiveMetastoreIfFactory : virtual public ::facebook::fb303::FacebookServiceIfFactory { @@ -771,6 +776,21 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& /* _return */, const WMGetTriggersForResourePlanRequest& /* request */) { return; } + void create_or_update_wm_pool(WMCreateOrUpdatePoolResponse& /* _return */, const WMCreateOrUpdatePoolRequest& /* request */) { + return; + } + void drop_wm_pool(WMDropPoolResponse& /* _return */, const WMDropPoolRequest& /* request */) { + return; + } + void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& /* _return */, const WMCreateOrUpdateMappingRequest& /* request */) { + return; + } + void drop_wm_mapping(WMDropMappingResponse& /* _return */, const WMDropMappingRequest& /* request */) { + return; + } + void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& /* _return */, const WMCreateOrDropTriggerToPoolMappingRequest& /* request */) { + return; + } }; typedef struct _ThriftHiveMetastore_getMetaConf_args__isset { @@ -22081,244 +22101,908 @@ class ThriftHiveMetastore_get_triggers_for_resourceplan_presult { }; -class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public ::facebook::fb303::FacebookServiceClient { +typedef struct _ThriftHiveMetastore_create_or_update_wm_pool_args__isset { + _ThriftHiveMetastore_create_or_update_wm_pool_args__isset() : request(false) {} + bool request :1; +} _ThriftHiveMetastore_create_or_update_wm_pool_args__isset; + +class ThriftHiveMetastore_create_or_update_wm_pool_args { public: - ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) : - ::facebook::fb303::FacebookServiceClient(prot, prot) {} - ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) : ::facebook::fb303::FacebookServiceClient(iprot, oprot) {} - boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() { - return piprot_; + + ThriftHiveMetastore_create_or_update_wm_pool_args(const ThriftHiveMetastore_create_or_update_wm_pool_args&); + ThriftHiveMetastore_create_or_update_wm_pool_args& operator=(const ThriftHiveMetastore_create_or_update_wm_pool_args&); + ThriftHiveMetastore_create_or_update_wm_pool_args() { } - boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() { - return poprot_; + + virtual ~ThriftHiveMetastore_create_or_update_wm_pool_args() throw(); + WMCreateOrUpdatePoolRequest request; + + _ThriftHiveMetastore_create_or_update_wm_pool_args__isset __isset; + + void __set_request(const WMCreateOrUpdatePoolRequest& val); + + bool operator == (const ThriftHiveMetastore_create_or_update_wm_pool_args & rhs) const + { + if (!(request == rhs.request)) + return false; + return true; } - void getMetaConf(std::string& _return, const std::string& key); - void send_getMetaConf(const std::string& key); - void recv_getMetaConf(std::string& _return); - void setMetaConf(const std::string& key, const std::string& value); - void send_setMetaConf(const std::string& key, const std::string& value); - void recv_setMetaConf(); - void create_database(const Database& database); - void send_create_database(const Database& database); - void recv_create_database(); - void get_database(Database& _return, const std::string& name); - void send_get_database(const std::string& name); - void recv_get_database(Database& _return); - void drop_database(const std::string& name, const bool deleteData, const bool cascade); - void send_drop_database(const std::string& name, const bool deleteData, const bool cascade); - void recv_drop_database(); - void get_databases(std::vector & _return, const std::string& pattern); - void send_get_databases(const std::string& pattern); - void recv_get_databases(std::vector & _return); - void get_all_databases(std::vector & _return); - void send_get_all_databases(); - void recv_get_all_databases(std::vector & _return); - void alter_database(const std::string& dbname, const Database& db); - void send_alter_database(const std::string& dbname, const Database& db); - void recv_alter_database(); - void get_type(Type& _return, const std::string& name); - void send_get_type(const std::string& name); - void recv_get_type(Type& _return); - bool create_type(const Type& type); - void send_create_type(const Type& type); - bool recv_create_type(); - bool drop_type(const std::string& type); - void send_drop_type(const std::string& type); - bool recv_drop_type(); - void get_type_all(std::map & _return, const std::string& name); - void send_get_type_all(const std::string& name); - void recv_get_type_all(std::map & _return); - void get_fields(std::vector & _return, const std::string& db_name, const std::string& table_name); - void send_get_fields(const std::string& db_name, const std::string& table_name); - void recv_get_fields(std::vector & _return); - void get_fields_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); - void send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); - void recv_get_fields_with_environment_context(std::vector & _return); - void get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name); - void send_get_schema(const std::string& db_name, const std::string& table_name); - void recv_get_schema(std::vector & _return); - void get_schema_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); - void send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); - void recv_get_schema_with_environment_context(std::vector & _return); - void create_table(const Table& tbl); - void send_create_table(const Table& tbl); - void recv_create_table(); - void create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context); - void send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context); - void recv_create_table_with_environment_context(); - void create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints); - void send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints); - void recv_create_table_with_constraints(); - void drop_constraint(const DropConstraintRequest& req); - void send_drop_constraint(const DropConstraintRequest& req); - void recv_drop_constraint(); - void add_primary_key(const AddPrimaryKeyRequest& req); - void send_add_primary_key(const AddPrimaryKeyRequest& req); - void recv_add_primary_key(); - void add_foreign_key(const AddForeignKeyRequest& req); - void send_add_foreign_key(const AddForeignKeyRequest& req); - void recv_add_foreign_key(); - void add_unique_constraint(const AddUniqueConstraintRequest& req); - void send_add_unique_constraint(const AddUniqueConstraintRequest& req); - void recv_add_unique_constraint(); - void add_not_null_constraint(const AddNotNullConstraintRequest& req); - void send_add_not_null_constraint(const AddNotNullConstraintRequest& req); - void recv_add_not_null_constraint(); - void drop_table(const std::string& dbname, const std::string& name, const bool deleteData); - void send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData); - void recv_drop_table(); - void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context); - void send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context); - void recv_drop_table_with_environment_context(); - void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames); - void send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames); - void recv_truncate_table(); - void get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern); - void send_get_tables(const std::string& db_name, const std::string& pattern); - void recv_get_tables(std::vector & _return); - void get_tables_by_type(std::vector & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType); - void send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType); - void recv_get_tables_by_type(std::vector & _return); - void get_table_meta(std::vector & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types); - void send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types); - void recv_get_table_meta(std::vector & _return); - void get_all_tables(std::vector & _return, const std::string& db_name); - void send_get_all_tables(const std::string& db_name); - void recv_get_all_tables(std::vector & _return); - void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name); - void send_get_table(const std::string& dbname, const std::string& tbl_name); - void recv_get_table(Table& _return); - void get_table_objects_by_name(std::vector

& _return, const std::string& dbname, const std::vector & tbl_names); - void send_get_table_objects_by_name(const std::string& dbname, const std::vector & tbl_names); - void recv_get_table_objects_by_name(std::vector
& _return); - void get_table_req(GetTableResult& _return, const GetTableRequest& req); - void send_get_table_req(const GetTableRequest& req); - void recv_get_table_req(GetTableResult& _return); - void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req); - void send_get_table_objects_by_name_req(const GetTablesRequest& req); - void recv_get_table_objects_by_name_req(GetTablesResult& _return); - void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables); - void send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables); - void recv_get_table_names_by_filter(std::vector & _return); - void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl); - void send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl); - void recv_alter_table(); - void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context); - void send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context); - void recv_alter_table_with_environment_context(); - void alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade); - void send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade); - void recv_alter_table_with_cascade(); - void add_partition(Partition& _return, const Partition& new_part); - void send_add_partition(const Partition& new_part); - void recv_add_partition(Partition& _return); - void add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context); - void send_add_partition_with_environment_context(const Partition& new_part, const EnvironmentContext& environment_context); - void recv_add_partition_with_environment_context(Partition& _return); - int32_t add_partitions(const std::vector & new_parts); - void send_add_partitions(const std::vector & new_parts); - int32_t recv_add_partitions(); - int32_t add_partitions_pspec(const std::vector & new_parts); - void send_add_partitions_pspec(const std::vector & new_parts); - int32_t recv_add_partitions_pspec(); - void append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); - void send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); - void recv_append_partition(Partition& _return); - void add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request); - void send_add_partitions_req(const AddPartitionsRequest& request); - void recv_add_partitions_req(AddPartitionsResult& _return); - void append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context); - void send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context); - void recv_append_partition_with_environment_context(Partition& _return); - void append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name); - void send_append_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name); - void recv_append_partition_by_name(Partition& _return); - void append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context); - void send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context); - void recv_append_partition_by_name_with_environment_context(Partition& _return); - bool drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData); - void send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData); - bool recv_drop_partition(); - bool drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context); - void send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context); - bool recv_drop_partition_with_environment_context(); - bool drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData); - void send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData); - bool recv_drop_partition_by_name(); - bool drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context); - void send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context); - bool recv_drop_partition_by_name_with_environment_context(); - void drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req); - void send_drop_partitions_req(const DropPartitionsRequest& req); - void recv_drop_partitions_req(DropPartitionsResult& _return); - void get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); - void send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); - void recv_get_partition(Partition& _return); - void exchange_partition(Partition& _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); - void send_exchange_partition(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); - void recv_exchange_partition(Partition& _return); - void exchange_partitions(std::vector & _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); - void send_exchange_partitions(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); - void recv_exchange_partitions(std::vector & _return); - void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names); - void send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names); - void recv_get_partition_with_auth(Partition& _return); - void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name); - void send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name); - void recv_get_partition_by_name(Partition& _return); - void get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); - void send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); - void recv_get_partitions(std::vector & _return); - void get_partitions_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); - void send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); - void recv_get_partitions_with_auth(std::vector & _return); - void get_partitions_pspec(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts); - void send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts); - void recv_get_partitions_pspec(std::vector & _return); - void get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); - void send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); - void recv_get_partition_names(std::vector & _return); - void get_partition_values(PartitionValuesResponse& _return, const PartitionValuesRequest& request); - void send_get_partition_values(const PartitionValuesRequest& request); - void recv_get_partition_values(PartitionValuesResponse& _return); - void get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); - void send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); - void recv_get_partitions_ps(std::vector & _return); - void get_partitions_ps_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); - void send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); - void recv_get_partitions_ps_with_auth(std::vector & _return); - void get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); - void send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); - void recv_get_partition_names_ps(std::vector & _return); - void get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts); - void send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts); - void recv_get_partitions_by_filter(std::vector & _return); - void get_part_specs_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts); - void send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts); - void recv_get_part_specs_by_filter(std::vector & _return); - void get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req); - void send_get_partitions_by_expr(const PartitionsByExprRequest& req); - void recv_get_partitions_by_expr(PartitionsByExprResult& _return); - int32_t get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter); - void send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter); - int32_t recv_get_num_partitions_by_filter(); - void get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names); - void send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names); - void recv_get_partitions_by_names(std::vector & _return); - void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part); - void send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part); - void recv_alter_partition(); - void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); - void send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); - void recv_alter_partitions(); - void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); - void send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); - void recv_alter_partitions_with_environment_context(); - void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); - void send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); - void recv_alter_partition_with_environment_context(); - void rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part); - void send_rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part); + bool operator != (const ThriftHiveMetastore_create_or_update_wm_pool_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_create_or_update_wm_pool_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_create_or_update_wm_pool_pargs { + public: + + + virtual ~ThriftHiveMetastore_create_or_update_wm_pool_pargs() throw(); + const WMCreateOrUpdatePoolRequest* request; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_create_or_update_wm_pool_result__isset { + _ThriftHiveMetastore_create_or_update_wm_pool_result__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; + bool o3 :1; + bool o4 :1; +} _ThriftHiveMetastore_create_or_update_wm_pool_result__isset; + +class ThriftHiveMetastore_create_or_update_wm_pool_result { + public: + + ThriftHiveMetastore_create_or_update_wm_pool_result(const ThriftHiveMetastore_create_or_update_wm_pool_result&); + ThriftHiveMetastore_create_or_update_wm_pool_result& operator=(const ThriftHiveMetastore_create_or_update_wm_pool_result&); + ThriftHiveMetastore_create_or_update_wm_pool_result() { + } + + virtual ~ThriftHiveMetastore_create_or_update_wm_pool_result() throw(); + WMCreateOrUpdatePoolResponse success; + AlreadyExistsException o1; + NoSuchObjectException o2; + InvalidObjectException o3; + MetaException o4; + + _ThriftHiveMetastore_create_or_update_wm_pool_result__isset __isset; + + void __set_success(const WMCreateOrUpdatePoolResponse& val); + + void __set_o1(const AlreadyExistsException& val); + + void __set_o2(const NoSuchObjectException& val); + + void __set_o3(const InvalidObjectException& val); + + void __set_o4(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_create_or_update_wm_pool_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + if (!(o4 == rhs.o4)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_create_or_update_wm_pool_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_create_or_update_wm_pool_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_create_or_update_wm_pool_presult__isset { + _ThriftHiveMetastore_create_or_update_wm_pool_presult__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; + bool o3 :1; + bool o4 :1; +} _ThriftHiveMetastore_create_or_update_wm_pool_presult__isset; + +class ThriftHiveMetastore_create_or_update_wm_pool_presult { + public: + + + virtual ~ThriftHiveMetastore_create_or_update_wm_pool_presult() throw(); + WMCreateOrUpdatePoolResponse* success; + AlreadyExistsException o1; + NoSuchObjectException o2; + InvalidObjectException o3; + MetaException o4; + + _ThriftHiveMetastore_create_or_update_wm_pool_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_drop_wm_pool_args__isset { + _ThriftHiveMetastore_drop_wm_pool_args__isset() : request(false) {} + bool request :1; +} _ThriftHiveMetastore_drop_wm_pool_args__isset; + +class ThriftHiveMetastore_drop_wm_pool_args { + public: + + ThriftHiveMetastore_drop_wm_pool_args(const ThriftHiveMetastore_drop_wm_pool_args&); + ThriftHiveMetastore_drop_wm_pool_args& operator=(const ThriftHiveMetastore_drop_wm_pool_args&); + ThriftHiveMetastore_drop_wm_pool_args() { + } + + virtual ~ThriftHiveMetastore_drop_wm_pool_args() throw(); + WMDropPoolRequest request; + + _ThriftHiveMetastore_drop_wm_pool_args__isset __isset; + + void __set_request(const WMDropPoolRequest& val); + + bool operator == (const ThriftHiveMetastore_drop_wm_pool_args & rhs) const + { + if (!(request == rhs.request)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_wm_pool_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_wm_pool_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_drop_wm_pool_pargs { + public: + + + virtual ~ThriftHiveMetastore_drop_wm_pool_pargs() throw(); + const WMDropPoolRequest* request; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_wm_pool_result__isset { + _ThriftHiveMetastore_drop_wm_pool_result__isset() : success(false), o1(false), o2(false), o3(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_drop_wm_pool_result__isset; + +class ThriftHiveMetastore_drop_wm_pool_result { + public: + + ThriftHiveMetastore_drop_wm_pool_result(const ThriftHiveMetastore_drop_wm_pool_result&); + ThriftHiveMetastore_drop_wm_pool_result& operator=(const ThriftHiveMetastore_drop_wm_pool_result&); + ThriftHiveMetastore_drop_wm_pool_result() { + } + + virtual ~ThriftHiveMetastore_drop_wm_pool_result() throw(); + WMDropPoolResponse success; + NoSuchObjectException o1; + InvalidOperationException o2; + MetaException o3; + + _ThriftHiveMetastore_drop_wm_pool_result__isset __isset; + + void __set_success(const WMDropPoolResponse& val); + + void __set_o1(const NoSuchObjectException& val); + + void __set_o2(const InvalidOperationException& val); + + void __set_o3(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_drop_wm_pool_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_wm_pool_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_wm_pool_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_wm_pool_presult__isset { + _ThriftHiveMetastore_drop_wm_pool_presult__isset() : success(false), o1(false), o2(false), o3(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_drop_wm_pool_presult__isset; + +class ThriftHiveMetastore_drop_wm_pool_presult { + public: + + + virtual ~ThriftHiveMetastore_drop_wm_pool_presult() throw(); + WMDropPoolResponse* success; + NoSuchObjectException o1; + InvalidOperationException o2; + MetaException o3; + + _ThriftHiveMetastore_drop_wm_pool_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_create_or_update_wm_mapping_args__isset { + _ThriftHiveMetastore_create_or_update_wm_mapping_args__isset() : request(false) {} + bool request :1; +} _ThriftHiveMetastore_create_or_update_wm_mapping_args__isset; + +class ThriftHiveMetastore_create_or_update_wm_mapping_args { + public: + + ThriftHiveMetastore_create_or_update_wm_mapping_args(const ThriftHiveMetastore_create_or_update_wm_mapping_args&); + ThriftHiveMetastore_create_or_update_wm_mapping_args& operator=(const ThriftHiveMetastore_create_or_update_wm_mapping_args&); + ThriftHiveMetastore_create_or_update_wm_mapping_args() { + } + + virtual ~ThriftHiveMetastore_create_or_update_wm_mapping_args() throw(); + WMCreateOrUpdateMappingRequest request; + + _ThriftHiveMetastore_create_or_update_wm_mapping_args__isset __isset; + + void __set_request(const WMCreateOrUpdateMappingRequest& val); + + bool operator == (const ThriftHiveMetastore_create_or_update_wm_mapping_args & rhs) const + { + if (!(request == rhs.request)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_create_or_update_wm_mapping_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_create_or_update_wm_mapping_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_create_or_update_wm_mapping_pargs { + public: + + + virtual ~ThriftHiveMetastore_create_or_update_wm_mapping_pargs() throw(); + const WMCreateOrUpdateMappingRequest* request; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_create_or_update_wm_mapping_result__isset { + _ThriftHiveMetastore_create_or_update_wm_mapping_result__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; + bool o3 :1; + bool o4 :1; +} _ThriftHiveMetastore_create_or_update_wm_mapping_result__isset; + +class ThriftHiveMetastore_create_or_update_wm_mapping_result { + public: + + ThriftHiveMetastore_create_or_update_wm_mapping_result(const ThriftHiveMetastore_create_or_update_wm_mapping_result&); + ThriftHiveMetastore_create_or_update_wm_mapping_result& operator=(const ThriftHiveMetastore_create_or_update_wm_mapping_result&); + ThriftHiveMetastore_create_or_update_wm_mapping_result() { + } + + virtual ~ThriftHiveMetastore_create_or_update_wm_mapping_result() throw(); + WMCreateOrUpdateMappingResponse success; + AlreadyExistsException o1; + NoSuchObjectException o2; + InvalidObjectException o3; + MetaException o4; + + _ThriftHiveMetastore_create_or_update_wm_mapping_result__isset __isset; + + void __set_success(const WMCreateOrUpdateMappingResponse& val); + + void __set_o1(const AlreadyExistsException& val); + + void __set_o2(const NoSuchObjectException& val); + + void __set_o3(const InvalidObjectException& val); + + void __set_o4(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_create_or_update_wm_mapping_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + if (!(o4 == rhs.o4)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_create_or_update_wm_mapping_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_create_or_update_wm_mapping_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_create_or_update_wm_mapping_presult__isset { + _ThriftHiveMetastore_create_or_update_wm_mapping_presult__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; + bool o3 :1; + bool o4 :1; +} _ThriftHiveMetastore_create_or_update_wm_mapping_presult__isset; + +class ThriftHiveMetastore_create_or_update_wm_mapping_presult { + public: + + + virtual ~ThriftHiveMetastore_create_or_update_wm_mapping_presult() throw(); + WMCreateOrUpdateMappingResponse* success; + AlreadyExistsException o1; + NoSuchObjectException o2; + InvalidObjectException o3; + MetaException o4; + + _ThriftHiveMetastore_create_or_update_wm_mapping_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_drop_wm_mapping_args__isset { + _ThriftHiveMetastore_drop_wm_mapping_args__isset() : request(false) {} + bool request :1; +} _ThriftHiveMetastore_drop_wm_mapping_args__isset; + +class ThriftHiveMetastore_drop_wm_mapping_args { + public: + + ThriftHiveMetastore_drop_wm_mapping_args(const ThriftHiveMetastore_drop_wm_mapping_args&); + ThriftHiveMetastore_drop_wm_mapping_args& operator=(const ThriftHiveMetastore_drop_wm_mapping_args&); + ThriftHiveMetastore_drop_wm_mapping_args() { + } + + virtual ~ThriftHiveMetastore_drop_wm_mapping_args() throw(); + WMDropMappingRequest request; + + _ThriftHiveMetastore_drop_wm_mapping_args__isset __isset; + + void __set_request(const WMDropMappingRequest& val); + + bool operator == (const ThriftHiveMetastore_drop_wm_mapping_args & rhs) const + { + if (!(request == rhs.request)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_wm_mapping_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_wm_mapping_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_drop_wm_mapping_pargs { + public: + + + virtual ~ThriftHiveMetastore_drop_wm_mapping_pargs() throw(); + const WMDropMappingRequest* request; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_wm_mapping_result__isset { + _ThriftHiveMetastore_drop_wm_mapping_result__isset() : success(false), o1(false), o2(false), o3(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_drop_wm_mapping_result__isset; + +class ThriftHiveMetastore_drop_wm_mapping_result { + public: + + ThriftHiveMetastore_drop_wm_mapping_result(const ThriftHiveMetastore_drop_wm_mapping_result&); + ThriftHiveMetastore_drop_wm_mapping_result& operator=(const ThriftHiveMetastore_drop_wm_mapping_result&); + ThriftHiveMetastore_drop_wm_mapping_result() { + } + + virtual ~ThriftHiveMetastore_drop_wm_mapping_result() throw(); + WMDropMappingResponse success; + NoSuchObjectException o1; + InvalidOperationException o2; + MetaException o3; + + _ThriftHiveMetastore_drop_wm_mapping_result__isset __isset; + + void __set_success(const WMDropMappingResponse& val); + + void __set_o1(const NoSuchObjectException& val); + + void __set_o2(const InvalidOperationException& val); + + void __set_o3(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_drop_wm_mapping_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_wm_mapping_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_wm_mapping_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_wm_mapping_presult__isset { + _ThriftHiveMetastore_drop_wm_mapping_presult__isset() : success(false), o1(false), o2(false), o3(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_drop_wm_mapping_presult__isset; + +class ThriftHiveMetastore_drop_wm_mapping_presult { + public: + + + virtual ~ThriftHiveMetastore_drop_wm_mapping_presult() throw(); + WMDropMappingResponse* success; + NoSuchObjectException o1; + InvalidOperationException o2; + MetaException o3; + + _ThriftHiveMetastore_drop_wm_mapping_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args__isset { + _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args__isset() : request(false) {} + bool request :1; +} _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args__isset; + +class ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args { + public: + + ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args(const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args&); + ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args& operator=(const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args&); + ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args() { + } + + virtual ~ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args() throw(); + WMCreateOrDropTriggerToPoolMappingRequest request; + + _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args__isset __isset; + + void __set_request(const WMCreateOrDropTriggerToPoolMappingRequest& val); + + bool operator == (const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args & rhs) const + { + if (!(request == rhs.request)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_pargs { + public: + + + virtual ~ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_pargs() throw(); + const WMCreateOrDropTriggerToPoolMappingRequest* request; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result__isset { + _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; + bool o3 :1; + bool o4 :1; +} _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result__isset; + +class ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result { + public: + + ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result(const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result&); + ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result& operator=(const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result&); + ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result() { + } + + virtual ~ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result() throw(); + WMCreateOrDropTriggerToPoolMappingResponse success; + AlreadyExistsException o1; + NoSuchObjectException o2; + InvalidObjectException o3; + MetaException o4; + + _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result__isset __isset; + + void __set_success(const WMCreateOrDropTriggerToPoolMappingResponse& val); + + void __set_o1(const AlreadyExistsException& val); + + void __set_o2(const NoSuchObjectException& val); + + void __set_o3(const InvalidObjectException& val); + + void __set_o4(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + if (!(o4 == rhs.o4)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult__isset { + _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; + bool o3 :1; + bool o4 :1; +} _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult__isset; + +class ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult { + public: + + + virtual ~ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult() throw(); + WMCreateOrDropTriggerToPoolMappingResponse* success; + AlreadyExistsException o1; + NoSuchObjectException o2; + InvalidObjectException o3; + MetaException o4; + + _ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public ::facebook::fb303::FacebookServiceClient { + public: + ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) : + ::facebook::fb303::FacebookServiceClient(prot, prot) {} + ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) : ::facebook::fb303::FacebookServiceClient(iprot, oprot) {} + boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() { + return piprot_; + } + boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() { + return poprot_; + } + void getMetaConf(std::string& _return, const std::string& key); + void send_getMetaConf(const std::string& key); + void recv_getMetaConf(std::string& _return); + void setMetaConf(const std::string& key, const std::string& value); + void send_setMetaConf(const std::string& key, const std::string& value); + void recv_setMetaConf(); + void create_database(const Database& database); + void send_create_database(const Database& database); + void recv_create_database(); + void get_database(Database& _return, const std::string& name); + void send_get_database(const std::string& name); + void recv_get_database(Database& _return); + void drop_database(const std::string& name, const bool deleteData, const bool cascade); + void send_drop_database(const std::string& name, const bool deleteData, const bool cascade); + void recv_drop_database(); + void get_databases(std::vector & _return, const std::string& pattern); + void send_get_databases(const std::string& pattern); + void recv_get_databases(std::vector & _return); + void get_all_databases(std::vector & _return); + void send_get_all_databases(); + void recv_get_all_databases(std::vector & _return); + void alter_database(const std::string& dbname, const Database& db); + void send_alter_database(const std::string& dbname, const Database& db); + void recv_alter_database(); + void get_type(Type& _return, const std::string& name); + void send_get_type(const std::string& name); + void recv_get_type(Type& _return); + bool create_type(const Type& type); + void send_create_type(const Type& type); + bool recv_create_type(); + bool drop_type(const std::string& type); + void send_drop_type(const std::string& type); + bool recv_drop_type(); + void get_type_all(std::map & _return, const std::string& name); + void send_get_type_all(const std::string& name); + void recv_get_type_all(std::map & _return); + void get_fields(std::vector & _return, const std::string& db_name, const std::string& table_name); + void send_get_fields(const std::string& db_name, const std::string& table_name); + void recv_get_fields(std::vector & _return); + void get_fields_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); + void send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); + void recv_get_fields_with_environment_context(std::vector & _return); + void get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name); + void send_get_schema(const std::string& db_name, const std::string& table_name); + void recv_get_schema(std::vector & _return); + void get_schema_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); + void send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); + void recv_get_schema_with_environment_context(std::vector & _return); + void create_table(const Table& tbl); + void send_create_table(const Table& tbl); + void recv_create_table(); + void create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context); + void send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context); + void recv_create_table_with_environment_context(); + void create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints); + void send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints); + void recv_create_table_with_constraints(); + void drop_constraint(const DropConstraintRequest& req); + void send_drop_constraint(const DropConstraintRequest& req); + void recv_drop_constraint(); + void add_primary_key(const AddPrimaryKeyRequest& req); + void send_add_primary_key(const AddPrimaryKeyRequest& req); + void recv_add_primary_key(); + void add_foreign_key(const AddForeignKeyRequest& req); + void send_add_foreign_key(const AddForeignKeyRequest& req); + void recv_add_foreign_key(); + void add_unique_constraint(const AddUniqueConstraintRequest& req); + void send_add_unique_constraint(const AddUniqueConstraintRequest& req); + void recv_add_unique_constraint(); + void add_not_null_constraint(const AddNotNullConstraintRequest& req); + void send_add_not_null_constraint(const AddNotNullConstraintRequest& req); + void recv_add_not_null_constraint(); + void drop_table(const std::string& dbname, const std::string& name, const bool deleteData); + void send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData); + void recv_drop_table(); + void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context); + void send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context); + void recv_drop_table_with_environment_context(); + void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames); + void send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames); + void recv_truncate_table(); + void get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern); + void send_get_tables(const std::string& db_name, const std::string& pattern); + void recv_get_tables(std::vector & _return); + void get_tables_by_type(std::vector & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType); + void send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType); + void recv_get_tables_by_type(std::vector & _return); + void get_table_meta(std::vector & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types); + void send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types); + void recv_get_table_meta(std::vector & _return); + void get_all_tables(std::vector & _return, const std::string& db_name); + void send_get_all_tables(const std::string& db_name); + void recv_get_all_tables(std::vector & _return); + void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name); + void send_get_table(const std::string& dbname, const std::string& tbl_name); + void recv_get_table(Table& _return); + void get_table_objects_by_name(std::vector
& _return, const std::string& dbname, const std::vector & tbl_names); + void send_get_table_objects_by_name(const std::string& dbname, const std::vector & tbl_names); + void recv_get_table_objects_by_name(std::vector
& _return); + void get_table_req(GetTableResult& _return, const GetTableRequest& req); + void send_get_table_req(const GetTableRequest& req); + void recv_get_table_req(GetTableResult& _return); + void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req); + void send_get_table_objects_by_name_req(const GetTablesRequest& req); + void recv_get_table_objects_by_name_req(GetTablesResult& _return); + void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables); + void send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables); + void recv_get_table_names_by_filter(std::vector & _return); + void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl); + void send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl); + void recv_alter_table(); + void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context); + void send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context); + void recv_alter_table_with_environment_context(); + void alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade); + void send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade); + void recv_alter_table_with_cascade(); + void add_partition(Partition& _return, const Partition& new_part); + void send_add_partition(const Partition& new_part); + void recv_add_partition(Partition& _return); + void add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context); + void send_add_partition_with_environment_context(const Partition& new_part, const EnvironmentContext& environment_context); + void recv_add_partition_with_environment_context(Partition& _return); + int32_t add_partitions(const std::vector & new_parts); + void send_add_partitions(const std::vector & new_parts); + int32_t recv_add_partitions(); + int32_t add_partitions_pspec(const std::vector & new_parts); + void send_add_partitions_pspec(const std::vector & new_parts); + int32_t recv_add_partitions_pspec(); + void append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); + void send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); + void recv_append_partition(Partition& _return); + void add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request); + void send_add_partitions_req(const AddPartitionsRequest& request); + void recv_add_partitions_req(AddPartitionsResult& _return); + void append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context); + void send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context); + void recv_append_partition_with_environment_context(Partition& _return); + void append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name); + void send_append_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name); + void recv_append_partition_by_name(Partition& _return); + void append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context); + void send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context); + void recv_append_partition_by_name_with_environment_context(Partition& _return); + bool drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData); + void send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData); + bool recv_drop_partition(); + bool drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context); + void send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context); + bool recv_drop_partition_with_environment_context(); + bool drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData); + void send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData); + bool recv_drop_partition_by_name(); + bool drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context); + void send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context); + bool recv_drop_partition_by_name_with_environment_context(); + void drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req); + void send_drop_partitions_req(const DropPartitionsRequest& req); + void recv_drop_partitions_req(DropPartitionsResult& _return); + void get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); + void send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); + void recv_get_partition(Partition& _return); + void exchange_partition(Partition& _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); + void send_exchange_partition(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); + void recv_exchange_partition(Partition& _return); + void exchange_partitions(std::vector & _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); + void send_exchange_partitions(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); + void recv_exchange_partitions(std::vector & _return); + void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names); + void send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names); + void recv_get_partition_with_auth(Partition& _return); + void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name); + void send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name); + void recv_get_partition_by_name(Partition& _return); + void get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); + void send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); + void recv_get_partitions(std::vector & _return); + void get_partitions_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); + void send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); + void recv_get_partitions_with_auth(std::vector & _return); + void get_partitions_pspec(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts); + void send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts); + void recv_get_partitions_pspec(std::vector & _return); + void get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); + void send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); + void recv_get_partition_names(std::vector & _return); + void get_partition_values(PartitionValuesResponse& _return, const PartitionValuesRequest& request); + void send_get_partition_values(const PartitionValuesRequest& request); + void recv_get_partition_values(PartitionValuesResponse& _return); + void get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); + void send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); + void recv_get_partitions_ps(std::vector & _return); + void get_partitions_ps_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); + void send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); + void recv_get_partitions_ps_with_auth(std::vector & _return); + void get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); + void send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); + void recv_get_partition_names_ps(std::vector & _return); + void get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts); + void send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts); + void recv_get_partitions_by_filter(std::vector & _return); + void get_part_specs_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts); + void send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts); + void recv_get_part_specs_by_filter(std::vector & _return); + void get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req); + void send_get_partitions_by_expr(const PartitionsByExprRequest& req); + void recv_get_partitions_by_expr(PartitionsByExprResult& _return); + int32_t get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter); + void send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter); + int32_t recv_get_num_partitions_by_filter(); + void get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names); + void send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names); + void recv_get_partitions_by_names(std::vector & _return); + void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part); + void send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part); + void recv_alter_partition(); + void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); + void send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); + void recv_alter_partitions(); + void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); + void send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); + void recv_alter_partitions_with_environment_context(); + void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); + void send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); + void recv_alter_partition_with_environment_context(); + void rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part); + void send_rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part); void recv_rename_partition(); bool partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception); void send_partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception); @@ -22611,6 +23295,21 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const WMGetTriggersForResourePlanRequest& request); void send_get_triggers_for_resourceplan(const WMGetTriggersForResourePlanRequest& request); void recv_get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return); + void create_or_update_wm_pool(WMCreateOrUpdatePoolResponse& _return, const WMCreateOrUpdatePoolRequest& request); + void send_create_or_update_wm_pool(const WMCreateOrUpdatePoolRequest& request); + void recv_create_or_update_wm_pool(WMCreateOrUpdatePoolResponse& _return); + void drop_wm_pool(WMDropPoolResponse& _return, const WMDropPoolRequest& request); + void send_drop_wm_pool(const WMDropPoolRequest& request); + void recv_drop_wm_pool(WMDropPoolResponse& _return); + void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request); + void send_create_or_update_wm_mapping(const WMCreateOrUpdateMappingRequest& request); + void recv_create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return); + void drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request); + void send_drop_wm_mapping(const WMDropMappingRequest& request); + void recv_drop_wm_mapping(WMDropMappingResponse& _return); + void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request); + void send_create_or_drop_wm_trigger_to_pool_mapping(const WMCreateOrDropTriggerToPoolMappingRequest& request); + void recv_create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return); }; class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceProcessor { @@ -22794,6 +23493,11 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_alter_wm_trigger(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_drop_wm_trigger(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_triggers_for_resourceplan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_create_or_update_wm_pool(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_drop_wm_pool(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_create_or_update_wm_mapping(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_drop_wm_mapping(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_create_or_drop_wm_trigger_to_pool_mapping(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); public: ThriftHiveMetastoreProcessor(boost::shared_ptr iface) : ::facebook::fb303::FacebookServiceProcessor(iface), @@ -22971,6 +23675,11 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["alter_wm_trigger"] = &ThriftHiveMetastoreProcessor::process_alter_wm_trigger; processMap_["drop_wm_trigger"] = &ThriftHiveMetastoreProcessor::process_drop_wm_trigger; processMap_["get_triggers_for_resourceplan"] = &ThriftHiveMetastoreProcessor::process_get_triggers_for_resourceplan; + processMap_["create_or_update_wm_pool"] = &ThriftHiveMetastoreProcessor::process_create_or_update_wm_pool; + processMap_["drop_wm_pool"] = &ThriftHiveMetastoreProcessor::process_drop_wm_pool; + processMap_["create_or_update_wm_mapping"] = &ThriftHiveMetastoreProcessor::process_create_or_update_wm_mapping; + processMap_["drop_wm_mapping"] = &ThriftHiveMetastoreProcessor::process_drop_wm_mapping; + processMap_["create_or_drop_wm_trigger_to_pool_mapping"] = &ThriftHiveMetastoreProcessor::process_create_or_drop_wm_trigger_to_pool_mapping; } virtual ~ThriftHiveMetastoreProcessor() {} @@ -24668,6 +25377,56 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi return; } + void create_or_update_wm_pool(WMCreateOrUpdatePoolResponse& _return, const WMCreateOrUpdatePoolRequest& request) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->create_or_update_wm_pool(_return, request); + } + ifaces_[i]->create_or_update_wm_pool(_return, request); + return; + } + + void drop_wm_pool(WMDropPoolResponse& _return, const WMDropPoolRequest& request) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->drop_wm_pool(_return, request); + } + ifaces_[i]->drop_wm_pool(_return, request); + return; + } + + void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->create_or_update_wm_mapping(_return, request); + } + ifaces_[i]->create_or_update_wm_mapping(_return, request); + return; + } + + void drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->drop_wm_mapping(_return, request); + } + ifaces_[i]->drop_wm_mapping(_return, request); + return; + } + + void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->create_or_drop_wm_trigger_to_pool_mapping(_return, request); + } + ifaces_[i]->create_or_drop_wm_trigger_to_pool_mapping(_return, request); + return; + } + }; // The 'concurrent' client is a thread safe client that correctly handles @@ -25203,6 +25962,21 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const WMGetTriggersForResourePlanRequest& request); int32_t send_get_triggers_for_resourceplan(const WMGetTriggersForResourePlanRequest& request); void recv_get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const int32_t seqid); + void create_or_update_wm_pool(WMCreateOrUpdatePoolResponse& _return, const WMCreateOrUpdatePoolRequest& request); + int32_t send_create_or_update_wm_pool(const WMCreateOrUpdatePoolRequest& request); + void recv_create_or_update_wm_pool(WMCreateOrUpdatePoolResponse& _return, const int32_t seqid); + void drop_wm_pool(WMDropPoolResponse& _return, const WMDropPoolRequest& request); + int32_t send_drop_wm_pool(const WMDropPoolRequest& request); + void recv_drop_wm_pool(WMDropPoolResponse& _return, const int32_t seqid); + void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request); + int32_t send_create_or_update_wm_mapping(const WMCreateOrUpdateMappingRequest& request); + void recv_create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const int32_t seqid); + void drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request); + int32_t send_drop_wm_mapping(const WMDropMappingRequest& request); + void recv_drop_wm_mapping(WMDropMappingResponse& _return, const int32_t seqid); + void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request); + int32_t send_create_or_drop_wm_trigger_to_pool_mapping(const WMCreateOrDropTriggerToPoolMappingRequest& request); + void recv_create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const int32_t seqid); }; #ifdef _WIN32 diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index 2aa3954d87..81650cccd5 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -887,6 +887,31 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("get_triggers_for_resourceplan\n"); } + void create_or_update_wm_pool(WMCreateOrUpdatePoolResponse& _return, const WMCreateOrUpdatePoolRequest& request) { + // Your implementation goes here + printf("create_or_update_wm_pool\n"); + } + + void drop_wm_pool(WMDropPoolResponse& _return, const WMDropPoolRequest& request) { + // Your implementation goes here + printf("drop_wm_pool\n"); + } + + void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request) { + // Your implementation goes here + printf("create_or_update_wm_mapping\n"); + } + + void drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request) { + // Your implementation goes here + printf("drop_wm_mapping\n"); + } + + void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request) { + // Your implementation goes here + printf("create_or_drop_wm_trigger_to_pool_mapping\n"); + } + }; int main(int argc, char **argv) { diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index c0ad739b7b..71b1f34860 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -21281,9 +21281,9 @@ void WMMapping::__set_entityName(const std::string& val) { this->entityName = val; } -void WMMapping::__set_poolName(const std::string& val) { - this->poolName = val; -__isset.poolName = true; +void WMMapping::__set_poolPath(const std::string& val) { + this->poolPath = val; +__isset.poolPath = true; } void WMMapping::__set_ordering(const int32_t val) { @@ -21341,8 +21341,8 @@ uint32_t WMMapping::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 4: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->poolName); - this->__isset.poolName = true; + xfer += iprot->readString(this->poolPath); + this->__isset.poolPath = true; } else { xfer += iprot->skip(ftype); } @@ -21390,9 +21390,9 @@ uint32_t WMMapping::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeString(this->entityName); xfer += oprot->writeFieldEnd(); - if (this->__isset.poolName) { - xfer += oprot->writeFieldBegin("poolName", ::apache::thrift::protocol::T_STRING, 4); - xfer += oprot->writeString(this->poolName); + if (this->__isset.poolPath) { + xfer += oprot->writeFieldBegin("poolPath", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->poolPath); xfer += oprot->writeFieldEnd(); } if (this->__isset.ordering) { @@ -21410,7 +21410,7 @@ void swap(WMMapping &a, WMMapping &b) { swap(a.resourcePlanName, b.resourcePlanName); swap(a.entityType, b.entityType); swap(a.entityName, b.entityName); - swap(a.poolName, b.poolName); + swap(a.poolPath, b.poolPath); swap(a.ordering, b.ordering); swap(a.__isset, b.__isset); } @@ -21419,7 +21419,7 @@ WMMapping::WMMapping(const WMMapping& other870) { resourcePlanName = other870.resourcePlanName; entityType = other870.entityType; entityName = other870.entityName; - poolName = other870.poolName; + poolPath = other870.poolPath; ordering = other870.ordering; __isset = other870.__isset; } @@ -21427,7 +21427,7 @@ WMMapping& WMMapping::operator=(const WMMapping& other871) { resourcePlanName = other871.resourcePlanName; entityType = other871.entityType; entityName = other871.entityName; - poolName = other871.poolName; + poolPath = other871.poolPath; ordering = other871.ordering; __isset = other871.__isset; return *this; @@ -21438,7 +21438,7 @@ void WMMapping::printTo(std::ostream& out) const { out << "resourcePlanName=" << to_string(resourcePlanName); out << ", " << "entityType=" << to_string(entityType); out << ", " << "entityName=" << to_string(entityName); - out << ", " << "poolName="; (__isset.poolName ? (out << to_string(poolName)) : (out << "")); + out << ", " << "poolPath="; (__isset.poolPath ? (out << to_string(poolPath)) : (out << "")); out << ", " << "ordering="; (__isset.ordering ? (out << to_string(ordering)) : (out << "")); out << ")"; } @@ -23671,15 +23671,26 @@ void WMGetTriggersForResourePlanResponse::printTo(std::ostream& out) const { } -MetaException::~MetaException() throw() { +WMCreateOrUpdatePoolRequest::~WMCreateOrUpdatePoolRequest() throw() { } -void MetaException::__set_message(const std::string& val) { - this->message = val; +void WMCreateOrUpdatePoolRequest::__set_pool(const WMPool& val) { + this->pool = val; +__isset.pool = true; } -uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) { +void WMCreateOrUpdatePoolRequest::__set_newPoolPath(const std::string& val) { + this->newPoolPath = val; +__isset.newPoolPath = true; +} + +void WMCreateOrUpdatePoolRequest::__set_update(const bool val) { + this->update = val; +__isset.update = true; +} + +uint32_t WMCreateOrUpdatePoolRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -23701,9 +23712,25 @@ uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) { switch (fid) { case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->pool.read(iprot); + this->__isset.pool = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->message); - this->__isset.message = true; + xfer += iprot->readString(this->newPoolPath); + this->__isset.newPoolPath = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->update); + this->__isset.update = true; } else { xfer += iprot->skip(ftype); } @@ -23720,63 +23747,67 @@ uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) { return xfer; } -uint32_t MetaException::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t WMCreateOrUpdatePoolRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("MetaException"); - - xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->message); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("WMCreateOrUpdatePoolRequest"); + if (this->__isset.pool) { + xfer += oprot->writeFieldBegin("pool", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->pool.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.newPoolPath) { + xfer += oprot->writeFieldBegin("newPoolPath", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->newPoolPath); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.update) { + xfer += oprot->writeFieldBegin("update", ::apache::thrift::protocol::T_BOOL, 3); + xfer += oprot->writeBool(this->update); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -void swap(MetaException &a, MetaException &b) { +void swap(WMCreateOrUpdatePoolRequest &a, WMCreateOrUpdatePoolRequest &b) { using ::std::swap; - swap(a.message, b.message); + swap(a.pool, b.pool); + swap(a.newPoolPath, b.newPoolPath); + swap(a.update, b.update); swap(a.__isset, b.__isset); } -MetaException::MetaException(const MetaException& other956) : TException() { - message = other956.message; +WMCreateOrUpdatePoolRequest::WMCreateOrUpdatePoolRequest(const WMCreateOrUpdatePoolRequest& other956) { + pool = other956.pool; + newPoolPath = other956.newPoolPath; + update = other956.update; __isset = other956.__isset; } -MetaException& MetaException::operator=(const MetaException& other957) { - message = other957.message; +WMCreateOrUpdatePoolRequest& WMCreateOrUpdatePoolRequest::operator=(const WMCreateOrUpdatePoolRequest& other957) { + pool = other957.pool; + newPoolPath = other957.newPoolPath; + update = other957.update; __isset = other957.__isset; return *this; } -void MetaException::printTo(std::ostream& out) const { +void WMCreateOrUpdatePoolRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "MetaException("; - out << "message=" << to_string(message); + out << "WMCreateOrUpdatePoolRequest("; + out << "pool="; (__isset.pool ? (out << to_string(pool)) : (out << "")); + out << ", " << "newPoolPath="; (__isset.newPoolPath ? (out << to_string(newPoolPath)) : (out << "")); + out << ", " << "update="; (__isset.update ? (out << to_string(update)) : (out << "")); out << ")"; } -const char* MetaException::what() const throw() { - try { - std::stringstream ss; - ss << "TException - service has thrown: " << *this; - this->thriftTExceptionMessageHolder_ = ss.str(); - return this->thriftTExceptionMessageHolder_.c_str(); - } catch (const std::exception&) { - return "TException - service has thrown: MetaException"; - } -} - -UnknownTableException::~UnknownTableException() throw() { +WMCreateOrUpdatePoolResponse::~WMCreateOrUpdatePoolResponse() throw() { } -void UnknownTableException::__set_message(const std::string& val) { - this->message = val; -} - -uint32_t UnknownTableException::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t WMCreateOrUpdatePoolResponse::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -23795,20 +23826,7 @@ uint32_t UnknownTableException::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->message); - this->__isset.message = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } + xfer += iprot->skip(ftype); xfer += iprot->readFieldEnd(); } @@ -23817,63 +23835,51 @@ uint32_t UnknownTableException::read(::apache::thrift::protocol::TProtocol* ipro return xfer; } -uint32_t UnknownTableException::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t WMCreateOrUpdatePoolResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("UnknownTableException"); - - xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->message); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("WMCreateOrUpdatePoolResponse"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -void swap(UnknownTableException &a, UnknownTableException &b) { +void swap(WMCreateOrUpdatePoolResponse &a, WMCreateOrUpdatePoolResponse &b) { using ::std::swap; - swap(a.message, b.message); - swap(a.__isset, b.__isset); + (void) a; + (void) b; } -UnknownTableException::UnknownTableException(const UnknownTableException& other958) : TException() { - message = other958.message; - __isset = other958.__isset; +WMCreateOrUpdatePoolResponse::WMCreateOrUpdatePoolResponse(const WMCreateOrUpdatePoolResponse& other958) { + (void) other958; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other959) { - message = other959.message; - __isset = other959.__isset; +WMCreateOrUpdatePoolResponse& WMCreateOrUpdatePoolResponse::operator=(const WMCreateOrUpdatePoolResponse& other959) { + (void) other959; return *this; } -void UnknownTableException::printTo(std::ostream& out) const { +void WMCreateOrUpdatePoolResponse::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "UnknownTableException("; - out << "message=" << to_string(message); + out << "WMCreateOrUpdatePoolResponse("; out << ")"; } -const char* UnknownTableException::what() const throw() { - try { - std::stringstream ss; - ss << "TException - service has thrown: " << *this; - this->thriftTExceptionMessageHolder_ = ss.str(); - return this->thriftTExceptionMessageHolder_.c_str(); - } catch (const std::exception&) { - return "TException - service has thrown: UnknownTableException"; - } + +WMDropPoolRequest::~WMDropPoolRequest() throw() { } -UnknownDBException::~UnknownDBException() throw() { +void WMDropPoolRequest::__set_resourcePlanName(const std::string& val) { + this->resourcePlanName = val; +__isset.resourcePlanName = true; } - -void UnknownDBException::__set_message(const std::string& val) { - this->message = val; +void WMDropPoolRequest::__set_poolPath(const std::string& val) { + this->poolPath = val; +__isset.poolPath = true; } -uint32_t UnknownDBException::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t WMDropPoolRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -23896,8 +23902,16 @@ uint32_t UnknownDBException::read(::apache::thrift::protocol::TProtocol* iprot) { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->message); - this->__isset.message = true; + xfer += iprot->readString(this->resourcePlanName); + this->__isset.resourcePlanName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->poolPath); + this->__isset.poolPath = true; } else { xfer += iprot->skip(ftype); } @@ -23914,63 +23928,58 @@ uint32_t UnknownDBException::read(::apache::thrift::protocol::TProtocol* iprot) return xfer; } -uint32_t UnknownDBException::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t WMDropPoolRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("UnknownDBException"); - - xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->message); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("WMDropPoolRequest"); + if (this->__isset.resourcePlanName) { + xfer += oprot->writeFieldBegin("resourcePlanName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->resourcePlanName); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.poolPath) { + xfer += oprot->writeFieldBegin("poolPath", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->poolPath); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -void swap(UnknownDBException &a, UnknownDBException &b) { +void swap(WMDropPoolRequest &a, WMDropPoolRequest &b) { using ::std::swap; - swap(a.message, b.message); + swap(a.resourcePlanName, b.resourcePlanName); + swap(a.poolPath, b.poolPath); swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other960) : TException() { - message = other960.message; +WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other960) { + resourcePlanName = other960.resourcePlanName; + poolPath = other960.poolPath; __isset = other960.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other961) { - message = other961.message; +WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other961) { + resourcePlanName = other961.resourcePlanName; + poolPath = other961.poolPath; __isset = other961.__isset; return *this; } -void UnknownDBException::printTo(std::ostream& out) const { +void WMDropPoolRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "UnknownDBException("; - out << "message=" << to_string(message); + out << "WMDropPoolRequest("; + out << "resourcePlanName="; (__isset.resourcePlanName ? (out << to_string(resourcePlanName)) : (out << "")); + out << ", " << "poolPath="; (__isset.poolPath ? (out << to_string(poolPath)) : (out << "")); out << ")"; } -const char* UnknownDBException::what() const throw() { - try { - std::stringstream ss; - ss << "TException - service has thrown: " << *this; - this->thriftTExceptionMessageHolder_ = ss.str(); - return this->thriftTExceptionMessageHolder_.c_str(); - } catch (const std::exception&) { - return "TException - service has thrown: UnknownDBException"; - } -} - -AlreadyExistsException::~AlreadyExistsException() throw() { +WMDropPoolResponse::~WMDropPoolResponse() throw() { } -void AlreadyExistsException::__set_message(const std::string& val) { - this->message = val; -} - -uint32_t AlreadyExistsException::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t WMDropPoolResponse::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -23989,20 +23998,7 @@ uint32_t AlreadyExistsException::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->message); - this->__isset.message = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } + xfer += iprot->skip(ftype); xfer += iprot->readFieldEnd(); } @@ -24011,63 +24007,51 @@ uint32_t AlreadyExistsException::read(::apache::thrift::protocol::TProtocol* ipr return xfer; } -uint32_t AlreadyExistsException::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t WMDropPoolResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("AlreadyExistsException"); - - xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->message); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("WMDropPoolResponse"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -void swap(AlreadyExistsException &a, AlreadyExistsException &b) { +void swap(WMDropPoolResponse &a, WMDropPoolResponse &b) { using ::std::swap; - swap(a.message, b.message); - swap(a.__isset, b.__isset); + (void) a; + (void) b; } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other962) : TException() { - message = other962.message; - __isset = other962.__isset; +WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other962) { + (void) other962; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other963) { - message = other963.message; - __isset = other963.__isset; +WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other963) { + (void) other963; return *this; } -void AlreadyExistsException::printTo(std::ostream& out) const { +void WMDropPoolResponse::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "AlreadyExistsException("; - out << "message=" << to_string(message); + out << "WMDropPoolResponse("; out << ")"; } -const char* AlreadyExistsException::what() const throw() { - try { - std::stringstream ss; - ss << "TException - service has thrown: " << *this; - this->thriftTExceptionMessageHolder_ = ss.str(); - return this->thriftTExceptionMessageHolder_.c_str(); - } catch (const std::exception&) { - return "TException - service has thrown: AlreadyExistsException"; - } + +WMCreateOrUpdateMappingRequest::~WMCreateOrUpdateMappingRequest() throw() { } -InvalidPartitionException::~InvalidPartitionException() throw() { +void WMCreateOrUpdateMappingRequest::__set_mapping(const WMMapping& val) { + this->mapping = val; +__isset.mapping = true; } - -void InvalidPartitionException::__set_message(const std::string& val) { - this->message = val; +void WMCreateOrUpdateMappingRequest::__set_update(const bool val) { + this->update = val; +__isset.update = true; } -uint32_t InvalidPartitionException::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t WMCreateOrUpdateMappingRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -24089,9 +24073,17 @@ uint32_t InvalidPartitionException::read(::apache::thrift::protocol::TProtocol* switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->message); - this->__isset.message = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->mapping.read(iprot); + this->__isset.mapping = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->update); + this->__isset.update = true; } else { xfer += iprot->skip(ftype); } @@ -24108,63 +24100,58 @@ uint32_t InvalidPartitionException::read(::apache::thrift::protocol::TProtocol* return xfer; } -uint32_t InvalidPartitionException::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t WMCreateOrUpdateMappingRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("InvalidPartitionException"); - - xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->message); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("WMCreateOrUpdateMappingRequest"); + if (this->__isset.mapping) { + xfer += oprot->writeFieldBegin("mapping", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->mapping.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.update) { + xfer += oprot->writeFieldBegin("update", ::apache::thrift::protocol::T_BOOL, 2); + xfer += oprot->writeBool(this->update); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -void swap(InvalidPartitionException &a, InvalidPartitionException &b) { +void swap(WMCreateOrUpdateMappingRequest &a, WMCreateOrUpdateMappingRequest &b) { using ::std::swap; - swap(a.message, b.message); + swap(a.mapping, b.mapping); + swap(a.update, b.update); swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other964) : TException() { - message = other964.message; +WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other964) { + mapping = other964.mapping; + update = other964.update; __isset = other964.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other965) { - message = other965.message; +WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other965) { + mapping = other965.mapping; + update = other965.update; __isset = other965.__isset; return *this; } -void InvalidPartitionException::printTo(std::ostream& out) const { +void WMCreateOrUpdateMappingRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "InvalidPartitionException("; - out << "message=" << to_string(message); + out << "WMCreateOrUpdateMappingRequest("; + out << "mapping="; (__isset.mapping ? (out << to_string(mapping)) : (out << "")); + out << ", " << "update="; (__isset.update ? (out << to_string(update)) : (out << "")); out << ")"; } -const char* InvalidPartitionException::what() const throw() { - try { - std::stringstream ss; - ss << "TException - service has thrown: " << *this; - this->thriftTExceptionMessageHolder_ = ss.str(); - return this->thriftTExceptionMessageHolder_.c_str(); - } catch (const std::exception&) { - return "TException - service has thrown: InvalidPartitionException"; - } -} - -UnknownPartitionException::~UnknownPartitionException() throw() { +WMCreateOrUpdateMappingResponse::~WMCreateOrUpdateMappingResponse() throw() { } -void UnknownPartitionException::__set_message(const std::string& val) { - this->message = val; -} - -uint32_t UnknownPartitionException::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t WMCreateOrUpdateMappingResponse::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -24183,19 +24170,936 @@ uint32_t UnknownPartitionException::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->message); - this->__isset.message = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t WMCreateOrUpdateMappingResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("WMCreateOrUpdateMappingResponse"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(WMCreateOrUpdateMappingResponse &a, WMCreateOrUpdateMappingResponse &b) { + using ::std::swap; + (void) a; + (void) b; +} + +WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other966) { + (void) other966; +} +WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other967) { + (void) other967; + return *this; +} +void WMCreateOrUpdateMappingResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "WMCreateOrUpdateMappingResponse("; + out << ")"; +} + + +WMDropMappingRequest::~WMDropMappingRequest() throw() { +} + + +void WMDropMappingRequest::__set_mapping(const WMMapping& val) { + this->mapping = val; +__isset.mapping = true; +} + +uint32_t WMDropMappingRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->mapping.read(iprot); + this->__isset.mapping = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t WMDropMappingRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("WMDropMappingRequest"); + + if (this->__isset.mapping) { + xfer += oprot->writeFieldBegin("mapping", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->mapping.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(WMDropMappingRequest &a, WMDropMappingRequest &b) { + using ::std::swap; + swap(a.mapping, b.mapping); + swap(a.__isset, b.__isset); +} + +WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other968) { + mapping = other968.mapping; + __isset = other968.__isset; +} +WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other969) { + mapping = other969.mapping; + __isset = other969.__isset; + return *this; +} +void WMDropMappingRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "WMDropMappingRequest("; + out << "mapping="; (__isset.mapping ? (out << to_string(mapping)) : (out << "")); + out << ")"; +} + + +WMDropMappingResponse::~WMDropMappingResponse() throw() { +} + + +uint32_t WMDropMappingResponse::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t WMDropMappingResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("WMDropMappingResponse"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(WMDropMappingResponse &a, WMDropMappingResponse &b) { + using ::std::swap; + (void) a; + (void) b; +} + +WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other970) { + (void) other970; +} +WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other971) { + (void) other971; + return *this; +} +void WMDropMappingResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "WMDropMappingResponse("; + out << ")"; +} + + +WMCreateOrDropTriggerToPoolMappingRequest::~WMCreateOrDropTriggerToPoolMappingRequest() throw() { +} + + +void WMCreateOrDropTriggerToPoolMappingRequest::__set_resourcePlanName(const std::string& val) { + this->resourcePlanName = val; +__isset.resourcePlanName = true; +} + +void WMCreateOrDropTriggerToPoolMappingRequest::__set_triggerName(const std::string& val) { + this->triggerName = val; +__isset.triggerName = true; +} + +void WMCreateOrDropTriggerToPoolMappingRequest::__set_poolPath(const std::string& val) { + this->poolPath = val; +__isset.poolPath = true; +} + +void WMCreateOrDropTriggerToPoolMappingRequest::__set_drop(const bool val) { + this->drop = val; +__isset.drop = true; +} + +uint32_t WMCreateOrDropTriggerToPoolMappingRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->resourcePlanName); + this->__isset.resourcePlanName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->triggerName); + this->__isset.triggerName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->poolPath); + this->__isset.poolPath = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->drop); + this->__isset.drop = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t WMCreateOrDropTriggerToPoolMappingRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("WMCreateOrDropTriggerToPoolMappingRequest"); + + if (this->__isset.resourcePlanName) { + xfer += oprot->writeFieldBegin("resourcePlanName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->resourcePlanName); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.triggerName) { + xfer += oprot->writeFieldBegin("triggerName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->triggerName); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.poolPath) { + xfer += oprot->writeFieldBegin("poolPath", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->poolPath); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.drop) { + xfer += oprot->writeFieldBegin("drop", ::apache::thrift::protocol::T_BOOL, 4); + xfer += oprot->writeBool(this->drop); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(WMCreateOrDropTriggerToPoolMappingRequest &a, WMCreateOrDropTriggerToPoolMappingRequest &b) { + using ::std::swap; + swap(a.resourcePlanName, b.resourcePlanName); + swap(a.triggerName, b.triggerName); + swap(a.poolPath, b.poolPath); + swap(a.drop, b.drop); + swap(a.__isset, b.__isset); +} + +WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other972) { + resourcePlanName = other972.resourcePlanName; + triggerName = other972.triggerName; + poolPath = other972.poolPath; + drop = other972.drop; + __isset = other972.__isset; +} +WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other973) { + resourcePlanName = other973.resourcePlanName; + triggerName = other973.triggerName; + poolPath = other973.poolPath; + drop = other973.drop; + __isset = other973.__isset; + return *this; +} +void WMCreateOrDropTriggerToPoolMappingRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "WMCreateOrDropTriggerToPoolMappingRequest("; + out << "resourcePlanName="; (__isset.resourcePlanName ? (out << to_string(resourcePlanName)) : (out << "")); + out << ", " << "triggerName="; (__isset.triggerName ? (out << to_string(triggerName)) : (out << "")); + out << ", " << "poolPath="; (__isset.poolPath ? (out << to_string(poolPath)) : (out << "")); + out << ", " << "drop="; (__isset.drop ? (out << to_string(drop)) : (out << "")); + out << ")"; +} + + +WMCreateOrDropTriggerToPoolMappingResponse::~WMCreateOrDropTriggerToPoolMappingResponse() throw() { +} + + +uint32_t WMCreateOrDropTriggerToPoolMappingResponse::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t WMCreateOrDropTriggerToPoolMappingResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("WMCreateOrDropTriggerToPoolMappingResponse"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(WMCreateOrDropTriggerToPoolMappingResponse &a, WMCreateOrDropTriggerToPoolMappingResponse &b) { + using ::std::swap; + (void) a; + (void) b; +} + +WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other974) { + (void) other974; +} +WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other975) { + (void) other975; + return *this; +} +void WMCreateOrDropTriggerToPoolMappingResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "WMCreateOrDropTriggerToPoolMappingResponse("; + out << ")"; +} + + +MetaException::~MetaException() throw() { +} + + +void MetaException::__set_message(const std::string& val) { + this->message = val; +} + +uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->message); + this->__isset.message = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t MetaException::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("MetaException"); + + xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->message); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(MetaException &a, MetaException &b) { + using ::std::swap; + swap(a.message, b.message); + swap(a.__isset, b.__isset); +} + +MetaException::MetaException(const MetaException& other976) : TException() { + message = other976.message; + __isset = other976.__isset; +} +MetaException& MetaException::operator=(const MetaException& other977) { + message = other977.message; + __isset = other977.__isset; + return *this; +} +void MetaException::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "MetaException("; + out << "message=" << to_string(message); + out << ")"; +} + +const char* MetaException::what() const throw() { + try { + std::stringstream ss; + ss << "TException - service has thrown: " << *this; + this->thriftTExceptionMessageHolder_ = ss.str(); + return this->thriftTExceptionMessageHolder_.c_str(); + } catch (const std::exception&) { + return "TException - service has thrown: MetaException"; + } +} + + +UnknownTableException::~UnknownTableException() throw() { +} + + +void UnknownTableException::__set_message(const std::string& val) { + this->message = val; +} + +uint32_t UnknownTableException::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->message); + this->__isset.message = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t UnknownTableException::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("UnknownTableException"); + + xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->message); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(UnknownTableException &a, UnknownTableException &b) { + using ::std::swap; + swap(a.message, b.message); + swap(a.__isset, b.__isset); +} + +UnknownTableException::UnknownTableException(const UnknownTableException& other978) : TException() { + message = other978.message; + __isset = other978.__isset; +} +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other979) { + message = other979.message; + __isset = other979.__isset; + return *this; +} +void UnknownTableException::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "UnknownTableException("; + out << "message=" << to_string(message); + out << ")"; +} + +const char* UnknownTableException::what() const throw() { + try { + std::stringstream ss; + ss << "TException - service has thrown: " << *this; + this->thriftTExceptionMessageHolder_ = ss.str(); + return this->thriftTExceptionMessageHolder_.c_str(); + } catch (const std::exception&) { + return "TException - service has thrown: UnknownTableException"; + } +} + + +UnknownDBException::~UnknownDBException() throw() { +} + + +void UnknownDBException::__set_message(const std::string& val) { + this->message = val; +} + +uint32_t UnknownDBException::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->message); + this->__isset.message = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t UnknownDBException::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("UnknownDBException"); + + xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->message); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(UnknownDBException &a, UnknownDBException &b) { + using ::std::swap; + swap(a.message, b.message); + swap(a.__isset, b.__isset); +} + +UnknownDBException::UnknownDBException(const UnknownDBException& other980) : TException() { + message = other980.message; + __isset = other980.__isset; +} +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other981) { + message = other981.message; + __isset = other981.__isset; + return *this; +} +void UnknownDBException::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "UnknownDBException("; + out << "message=" << to_string(message); + out << ")"; +} + +const char* UnknownDBException::what() const throw() { + try { + std::stringstream ss; + ss << "TException - service has thrown: " << *this; + this->thriftTExceptionMessageHolder_ = ss.str(); + return this->thriftTExceptionMessageHolder_.c_str(); + } catch (const std::exception&) { + return "TException - service has thrown: UnknownDBException"; + } +} + + +AlreadyExistsException::~AlreadyExistsException() throw() { +} + + +void AlreadyExistsException::__set_message(const std::string& val) { + this->message = val; +} + +uint32_t AlreadyExistsException::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->message); + this->__isset.message = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t AlreadyExistsException::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("AlreadyExistsException"); + + xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->message); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(AlreadyExistsException &a, AlreadyExistsException &b) { + using ::std::swap; + swap(a.message, b.message); + swap(a.__isset, b.__isset); +} + +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other982) : TException() { + message = other982.message; + __isset = other982.__isset; +} +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other983) { + message = other983.message; + __isset = other983.__isset; + return *this; +} +void AlreadyExistsException::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "AlreadyExistsException("; + out << "message=" << to_string(message); + out << ")"; +} + +const char* AlreadyExistsException::what() const throw() { + try { + std::stringstream ss; + ss << "TException - service has thrown: " << *this; + this->thriftTExceptionMessageHolder_ = ss.str(); + return this->thriftTExceptionMessageHolder_.c_str(); + } catch (const std::exception&) { + return "TException - service has thrown: AlreadyExistsException"; + } +} + + +InvalidPartitionException::~InvalidPartitionException() throw() { +} + + +void InvalidPartitionException::__set_message(const std::string& val) { + this->message = val; +} + +uint32_t InvalidPartitionException::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->message); + this->__isset.message = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t InvalidPartitionException::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("InvalidPartitionException"); + + xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->message); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(InvalidPartitionException &a, InvalidPartitionException &b) { + using ::std::swap; + swap(a.message, b.message); + swap(a.__isset, b.__isset); +} + +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other984) : TException() { + message = other984.message; + __isset = other984.__isset; +} +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other985) { + message = other985.message; + __isset = other985.__isset; + return *this; +} +void InvalidPartitionException::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "InvalidPartitionException("; + out << "message=" << to_string(message); + out << ")"; +} + +const char* InvalidPartitionException::what() const throw() { + try { + std::stringstream ss; + ss << "TException - service has thrown: " << *this; + this->thriftTExceptionMessageHolder_ = ss.str(); + return this->thriftTExceptionMessageHolder_.c_str(); + } catch (const std::exception&) { + return "TException - service has thrown: InvalidPartitionException"; + } +} + + +UnknownPartitionException::~UnknownPartitionException() throw() { +} + + +void UnknownPartitionException::__set_message(const std::string& val) { + this->message = val; +} + +uint32_t UnknownPartitionException::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->message); + this->__isset.message = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; } xfer += iprot->readFieldEnd(); } @@ -24225,13 +25129,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other966) : TException() { - message = other966.message; - __isset = other966.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other986) : TException() { + message = other986.message; + __isset = other986.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other967) { - message = other967.message; - __isset = other967.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other987) { + message = other987.message; + __isset = other987.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -24322,13 +25226,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other968) : TException() { - message = other968.message; - __isset = other968.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other988) : TException() { + message = other988.message; + __isset = other988.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other969) { - message = other969.message; - __isset = other969.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other989) { + message = other989.message; + __isset = other989.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -24419,13 +25323,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other970) : TException() { - message = other970.message; - __isset = other970.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other990) : TException() { + message = other990.message; + __isset = other990.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other971) { - message = other971.message; - __isset = other971.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other991) { + message = other991.message; + __isset = other991.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -24516,13 +25420,13 @@ void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) { swap(a.__isset, b.__isset); } -IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other972) : TException() { - message = other972.message; - __isset = other972.__isset; +IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other992) : TException() { + message = other992.message; + __isset = other992.__isset; } -IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other973) { - message = other973.message; - __isset = other973.__isset; +IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other993) { + message = other993.message; + __isset = other993.__isset; return *this; } void IndexAlreadyExistsException::printTo(std::ostream& out) const { @@ -24613,13 +25517,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other974) : TException() { - message = other974.message; - __isset = other974.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other994) : TException() { + message = other994.message; + __isset = other994.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other975) { - message = other975.message; - __isset = other975.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other995) { + message = other995.message; + __isset = other995.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -24710,13 +25614,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other976) : TException() { - message = other976.message; - __isset = other976.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other996) : TException() { + message = other996.message; + __isset = other996.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other977) { - message = other977.message; - __isset = other977.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other997) { + message = other997.message; + __isset = other997.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -24807,13 +25711,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other978) : TException() { - message = other978.message; - __isset = other978.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other998) : TException() { + message = other998.message; + __isset = other998.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other979) { - message = other979.message; - __isset = other979.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other999) { + message = other999.message; + __isset = other999.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -24904,13 +25808,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other980) : TException() { - message = other980.message; - __isset = other980.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1000) : TException() { + message = other1000.message; + __isset = other1000.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other981) { - message = other981.message; - __isset = other981.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1001) { + message = other1001.message; + __isset = other1001.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -25001,13 +25905,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other982) : TException() { - message = other982.message; - __isset = other982.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1002) : TException() { + message = other1002.message; + __isset = other1002.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other983) { - message = other983.message; - __isset = other983.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1003) { + message = other1003.message; + __isset = other1003.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -25098,13 +26002,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other984) : TException() { - message = other984.message; - __isset = other984.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other1004) : TException() { + message = other1004.message; + __isset = other1004.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other985) { - message = other985.message; - __isset = other985.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1005) { + message = other1005.message; + __isset = other1005.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -25195,13 +26099,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other986) : TException() { - message = other986.message; - __isset = other986.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1006) : TException() { + message = other1006.message; + __isset = other1006.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other987) { - message = other987.message; - __isset = other987.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1007) { + message = other1007.message; + __isset = other1007.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index cdf0570079..1d43e3912a 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -505,6 +505,26 @@ class WMGetTriggersForResourePlanRequest; class WMGetTriggersForResourePlanResponse; +class WMCreateOrUpdatePoolRequest; + +class WMCreateOrUpdatePoolResponse; + +class WMDropPoolRequest; + +class WMDropPoolResponse; + +class WMCreateOrUpdateMappingRequest; + +class WMCreateOrUpdateMappingResponse; + +class WMDropMappingRequest; + +class WMDropMappingResponse; + +class WMCreateOrDropTriggerToPoolMappingRequest; + +class WMCreateOrDropTriggerToPoolMappingResponse; + class MetaException; class UnknownTableException; @@ -8718,8 +8738,8 @@ inline std::ostream& operator<<(std::ostream& out, const WMTrigger& obj) } typedef struct _WMMapping__isset { - _WMMapping__isset() : poolName(false), ordering(false) {} - bool poolName :1; + _WMMapping__isset() : poolPath(false), ordering(false) {} + bool poolPath :1; bool ordering :1; } _WMMapping__isset; @@ -8728,14 +8748,14 @@ class WMMapping { WMMapping(const WMMapping&); WMMapping& operator=(const WMMapping&); - WMMapping() : resourcePlanName(), entityType(), entityName(), poolName(), ordering(0) { + WMMapping() : resourcePlanName(), entityType(), entityName(), poolPath(), ordering(0) { } virtual ~WMMapping() throw(); std::string resourcePlanName; std::string entityType; std::string entityName; - std::string poolName; + std::string poolPath; int32_t ordering; _WMMapping__isset __isset; @@ -8746,7 +8766,7 @@ class WMMapping { void __set_entityName(const std::string& val); - void __set_poolName(const std::string& val); + void __set_poolPath(const std::string& val); void __set_ordering(const int32_t val); @@ -8758,9 +8778,9 @@ class WMMapping { return false; if (!(entityName == rhs.entityName)) return false; - if (__isset.poolName != rhs.__isset.poolName) + if (__isset.poolPath != rhs.__isset.poolPath) return false; - else if (__isset.poolName && !(poolName == rhs.poolName)) + else if (__isset.poolPath && !(poolPath == rhs.poolPath)) return false; if (__isset.ordering != rhs.__isset.ordering) return false; @@ -9896,6 +9916,477 @@ inline std::ostream& operator<<(std::ostream& out, const WMGetTriggersForResoure return out; } +typedef struct _WMCreateOrUpdatePoolRequest__isset { + _WMCreateOrUpdatePoolRequest__isset() : pool(false), newPoolPath(false), update(false) {} + bool pool :1; + bool newPoolPath :1; + bool update :1; +} _WMCreateOrUpdatePoolRequest__isset; + +class WMCreateOrUpdatePoolRequest { + public: + + WMCreateOrUpdatePoolRequest(const WMCreateOrUpdatePoolRequest&); + WMCreateOrUpdatePoolRequest& operator=(const WMCreateOrUpdatePoolRequest&); + WMCreateOrUpdatePoolRequest() : newPoolPath(), update(0) { + } + + virtual ~WMCreateOrUpdatePoolRequest() throw(); + WMPool pool; + std::string newPoolPath; + bool update; + + _WMCreateOrUpdatePoolRequest__isset __isset; + + void __set_pool(const WMPool& val); + + void __set_newPoolPath(const std::string& val); + + void __set_update(const bool val); + + bool operator == (const WMCreateOrUpdatePoolRequest & rhs) const + { + if (__isset.pool != rhs.__isset.pool) + return false; + else if (__isset.pool && !(pool == rhs.pool)) + return false; + if (__isset.newPoolPath != rhs.__isset.newPoolPath) + return false; + else if (__isset.newPoolPath && !(newPoolPath == rhs.newPoolPath)) + return false; + if (__isset.update != rhs.__isset.update) + return false; + else if (__isset.update && !(update == rhs.update)) + return false; + return true; + } + bool operator != (const WMCreateOrUpdatePoolRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const WMCreateOrUpdatePoolRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(WMCreateOrUpdatePoolRequest &a, WMCreateOrUpdatePoolRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const WMCreateOrUpdatePoolRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class WMCreateOrUpdatePoolResponse { + public: + + WMCreateOrUpdatePoolResponse(const WMCreateOrUpdatePoolResponse&); + WMCreateOrUpdatePoolResponse& operator=(const WMCreateOrUpdatePoolResponse&); + WMCreateOrUpdatePoolResponse() { + } + + virtual ~WMCreateOrUpdatePoolResponse() throw(); + + bool operator == (const WMCreateOrUpdatePoolResponse & /* rhs */) const + { + return true; + } + bool operator != (const WMCreateOrUpdatePoolResponse &rhs) const { + return !(*this == rhs); + } + + bool operator < (const WMCreateOrUpdatePoolResponse & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(WMCreateOrUpdatePoolResponse &a, WMCreateOrUpdatePoolResponse &b); + +inline std::ostream& operator<<(std::ostream& out, const WMCreateOrUpdatePoolResponse& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _WMDropPoolRequest__isset { + _WMDropPoolRequest__isset() : resourcePlanName(false), poolPath(false) {} + bool resourcePlanName :1; + bool poolPath :1; +} _WMDropPoolRequest__isset; + +class WMDropPoolRequest { + public: + + WMDropPoolRequest(const WMDropPoolRequest&); + WMDropPoolRequest& operator=(const WMDropPoolRequest&); + WMDropPoolRequest() : resourcePlanName(), poolPath() { + } + + virtual ~WMDropPoolRequest() throw(); + std::string resourcePlanName; + std::string poolPath; + + _WMDropPoolRequest__isset __isset; + + void __set_resourcePlanName(const std::string& val); + + void __set_poolPath(const std::string& val); + + bool operator == (const WMDropPoolRequest & rhs) const + { + if (__isset.resourcePlanName != rhs.__isset.resourcePlanName) + return false; + else if (__isset.resourcePlanName && !(resourcePlanName == rhs.resourcePlanName)) + return false; + if (__isset.poolPath != rhs.__isset.poolPath) + return false; + else if (__isset.poolPath && !(poolPath == rhs.poolPath)) + return false; + return true; + } + bool operator != (const WMDropPoolRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const WMDropPoolRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(WMDropPoolRequest &a, WMDropPoolRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const WMDropPoolRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class WMDropPoolResponse { + public: + + WMDropPoolResponse(const WMDropPoolResponse&); + WMDropPoolResponse& operator=(const WMDropPoolResponse&); + WMDropPoolResponse() { + } + + virtual ~WMDropPoolResponse() throw(); + + bool operator == (const WMDropPoolResponse & /* rhs */) const + { + return true; + } + bool operator != (const WMDropPoolResponse &rhs) const { + return !(*this == rhs); + } + + bool operator < (const WMDropPoolResponse & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(WMDropPoolResponse &a, WMDropPoolResponse &b); + +inline std::ostream& operator<<(std::ostream& out, const WMDropPoolResponse& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _WMCreateOrUpdateMappingRequest__isset { + _WMCreateOrUpdateMappingRequest__isset() : mapping(false), update(false) {} + bool mapping :1; + bool update :1; +} _WMCreateOrUpdateMappingRequest__isset; + +class WMCreateOrUpdateMappingRequest { + public: + + WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest&); + WMCreateOrUpdateMappingRequest& operator=(const WMCreateOrUpdateMappingRequest&); + WMCreateOrUpdateMappingRequest() : update(0) { + } + + virtual ~WMCreateOrUpdateMappingRequest() throw(); + WMMapping mapping; + bool update; + + _WMCreateOrUpdateMappingRequest__isset __isset; + + void __set_mapping(const WMMapping& val); + + void __set_update(const bool val); + + bool operator == (const WMCreateOrUpdateMappingRequest & rhs) const + { + if (__isset.mapping != rhs.__isset.mapping) + return false; + else if (__isset.mapping && !(mapping == rhs.mapping)) + return false; + if (__isset.update != rhs.__isset.update) + return false; + else if (__isset.update && !(update == rhs.update)) + return false; + return true; + } + bool operator != (const WMCreateOrUpdateMappingRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const WMCreateOrUpdateMappingRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(WMCreateOrUpdateMappingRequest &a, WMCreateOrUpdateMappingRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const WMCreateOrUpdateMappingRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class WMCreateOrUpdateMappingResponse { + public: + + WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse&); + WMCreateOrUpdateMappingResponse& operator=(const WMCreateOrUpdateMappingResponse&); + WMCreateOrUpdateMappingResponse() { + } + + virtual ~WMCreateOrUpdateMappingResponse() throw(); + + bool operator == (const WMCreateOrUpdateMappingResponse & /* rhs */) const + { + return true; + } + bool operator != (const WMCreateOrUpdateMappingResponse &rhs) const { + return !(*this == rhs); + } + + bool operator < (const WMCreateOrUpdateMappingResponse & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(WMCreateOrUpdateMappingResponse &a, WMCreateOrUpdateMappingResponse &b); + +inline std::ostream& operator<<(std::ostream& out, const WMCreateOrUpdateMappingResponse& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _WMDropMappingRequest__isset { + _WMDropMappingRequest__isset() : mapping(false) {} + bool mapping :1; +} _WMDropMappingRequest__isset; + +class WMDropMappingRequest { + public: + + WMDropMappingRequest(const WMDropMappingRequest&); + WMDropMappingRequest& operator=(const WMDropMappingRequest&); + WMDropMappingRequest() { + } + + virtual ~WMDropMappingRequest() throw(); + WMMapping mapping; + + _WMDropMappingRequest__isset __isset; + + void __set_mapping(const WMMapping& val); + + bool operator == (const WMDropMappingRequest & rhs) const + { + if (__isset.mapping != rhs.__isset.mapping) + return false; + else if (__isset.mapping && !(mapping == rhs.mapping)) + return false; + return true; + } + bool operator != (const WMDropMappingRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const WMDropMappingRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(WMDropMappingRequest &a, WMDropMappingRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const WMDropMappingRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class WMDropMappingResponse { + public: + + WMDropMappingResponse(const WMDropMappingResponse&); + WMDropMappingResponse& operator=(const WMDropMappingResponse&); + WMDropMappingResponse() { + } + + virtual ~WMDropMappingResponse() throw(); + + bool operator == (const WMDropMappingResponse & /* rhs */) const + { + return true; + } + bool operator != (const WMDropMappingResponse &rhs) const { + return !(*this == rhs); + } + + bool operator < (const WMDropMappingResponse & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(WMDropMappingResponse &a, WMDropMappingResponse &b); + +inline std::ostream& operator<<(std::ostream& out, const WMDropMappingResponse& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _WMCreateOrDropTriggerToPoolMappingRequest__isset { + _WMCreateOrDropTriggerToPoolMappingRequest__isset() : resourcePlanName(false), triggerName(false), poolPath(false), drop(false) {} + bool resourcePlanName :1; + bool triggerName :1; + bool poolPath :1; + bool drop :1; +} _WMCreateOrDropTriggerToPoolMappingRequest__isset; + +class WMCreateOrDropTriggerToPoolMappingRequest { + public: + + WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest&); + WMCreateOrDropTriggerToPoolMappingRequest& operator=(const WMCreateOrDropTriggerToPoolMappingRequest&); + WMCreateOrDropTriggerToPoolMappingRequest() : resourcePlanName(), triggerName(), poolPath(), drop(0) { + } + + virtual ~WMCreateOrDropTriggerToPoolMappingRequest() throw(); + std::string resourcePlanName; + std::string triggerName; + std::string poolPath; + bool drop; + + _WMCreateOrDropTriggerToPoolMappingRequest__isset __isset; + + void __set_resourcePlanName(const std::string& val); + + void __set_triggerName(const std::string& val); + + void __set_poolPath(const std::string& val); + + void __set_drop(const bool val); + + bool operator == (const WMCreateOrDropTriggerToPoolMappingRequest & rhs) const + { + if (__isset.resourcePlanName != rhs.__isset.resourcePlanName) + return false; + else if (__isset.resourcePlanName && !(resourcePlanName == rhs.resourcePlanName)) + return false; + if (__isset.triggerName != rhs.__isset.triggerName) + return false; + else if (__isset.triggerName && !(triggerName == rhs.triggerName)) + return false; + if (__isset.poolPath != rhs.__isset.poolPath) + return false; + else if (__isset.poolPath && !(poolPath == rhs.poolPath)) + return false; + if (__isset.drop != rhs.__isset.drop) + return false; + else if (__isset.drop && !(drop == rhs.drop)) + return false; + return true; + } + bool operator != (const WMCreateOrDropTriggerToPoolMappingRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const WMCreateOrDropTriggerToPoolMappingRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(WMCreateOrDropTriggerToPoolMappingRequest &a, WMCreateOrDropTriggerToPoolMappingRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const WMCreateOrDropTriggerToPoolMappingRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class WMCreateOrDropTriggerToPoolMappingResponse { + public: + + WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse&); + WMCreateOrDropTriggerToPoolMappingResponse& operator=(const WMCreateOrDropTriggerToPoolMappingResponse&); + WMCreateOrDropTriggerToPoolMappingResponse() { + } + + virtual ~WMCreateOrDropTriggerToPoolMappingResponse() throw(); + + bool operator == (const WMCreateOrDropTriggerToPoolMappingResponse & /* rhs */) const + { + return true; + } + bool operator != (const WMCreateOrDropTriggerToPoolMappingResponse &rhs) const { + return !(*this == rhs); + } + + bool operator < (const WMCreateOrDropTriggerToPoolMappingResponse & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(WMCreateOrDropTriggerToPoolMappingResponse &a, WMCreateOrDropTriggerToPoolMappingResponse &b); + +inline std::ostream& operator<<(std::ostream& out, const WMCreateOrDropTriggerToPoolMappingResponse& obj) +{ + obj.printTo(out); + return out; +} + typedef struct _MetaException__isset { _MetaException__isset() : message(false) {} bool message :1; diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index febf3047ba..64e24c56ab 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -388,6 +388,16 @@ public WMGetTriggersForResourePlanResponse get_triggers_for_resourceplan(WMGetTriggersForResourePlanRequest request) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public WMCreateOrUpdatePoolResponse create_or_update_wm_pool(WMCreateOrUpdatePoolRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException; + + public WMDropPoolResponse drop_wm_pool(WMDropPoolRequest request) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException; + + public WMCreateOrUpdateMappingResponse create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException; + + public WMDropMappingResponse drop_wm_mapping(WMDropMappingRequest request) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException; + + public WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException; + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface { @@ -738,6 +748,16 @@ public void get_triggers_for_resourceplan(WMGetTriggersForResourePlanRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void create_or_update_wm_pool(WMCreateOrUpdatePoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void drop_wm_pool(WMDropPoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void drop_wm_mapping(WMDropMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface { @@ -5739,6 +5759,175 @@ public WMGetTriggersForResourePlanResponse recv_get_triggers_for_resourceplan() throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_triggers_for_resourceplan failed: unknown result"); } + public WMCreateOrUpdatePoolResponse create_or_update_wm_pool(WMCreateOrUpdatePoolRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException + { + send_create_or_update_wm_pool(request); + return recv_create_or_update_wm_pool(); + } + + public void send_create_or_update_wm_pool(WMCreateOrUpdatePoolRequest request) throws org.apache.thrift.TException + { + create_or_update_wm_pool_args args = new create_or_update_wm_pool_args(); + args.setRequest(request); + sendBase("create_or_update_wm_pool", args); + } + + public WMCreateOrUpdatePoolResponse recv_create_or_update_wm_pool() throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException + { + create_or_update_wm_pool_result result = new create_or_update_wm_pool_result(); + receiveBase(result, "create_or_update_wm_pool"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + if (result.o4 != null) { + throw result.o4; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "create_or_update_wm_pool failed: unknown result"); + } + + public WMDropPoolResponse drop_wm_pool(WMDropPoolRequest request) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException + { + send_drop_wm_pool(request); + return recv_drop_wm_pool(); + } + + public void send_drop_wm_pool(WMDropPoolRequest request) throws org.apache.thrift.TException + { + drop_wm_pool_args args = new drop_wm_pool_args(); + args.setRequest(request); + sendBase("drop_wm_pool", args); + } + + public WMDropPoolResponse recv_drop_wm_pool() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException + { + drop_wm_pool_result result = new drop_wm_pool_result(); + receiveBase(result, "drop_wm_pool"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "drop_wm_pool failed: unknown result"); + } + + public WMCreateOrUpdateMappingResponse create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException + { + send_create_or_update_wm_mapping(request); + return recv_create_or_update_wm_mapping(); + } + + public void send_create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request) throws org.apache.thrift.TException + { + create_or_update_wm_mapping_args args = new create_or_update_wm_mapping_args(); + args.setRequest(request); + sendBase("create_or_update_wm_mapping", args); + } + + public WMCreateOrUpdateMappingResponse recv_create_or_update_wm_mapping() throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException + { + create_or_update_wm_mapping_result result = new create_or_update_wm_mapping_result(); + receiveBase(result, "create_or_update_wm_mapping"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + if (result.o4 != null) { + throw result.o4; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "create_or_update_wm_mapping failed: unknown result"); + } + + public WMDropMappingResponse drop_wm_mapping(WMDropMappingRequest request) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException + { + send_drop_wm_mapping(request); + return recv_drop_wm_mapping(); + } + + public void send_drop_wm_mapping(WMDropMappingRequest request) throws org.apache.thrift.TException + { + drop_wm_mapping_args args = new drop_wm_mapping_args(); + args.setRequest(request); + sendBase("drop_wm_mapping", args); + } + + public WMDropMappingResponse recv_drop_wm_mapping() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException + { + drop_wm_mapping_result result = new drop_wm_mapping_result(); + receiveBase(result, "drop_wm_mapping"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "drop_wm_mapping failed: unknown result"); + } + + public WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException + { + send_create_or_drop_wm_trigger_to_pool_mapping(request); + return recv_create_or_drop_wm_trigger_to_pool_mapping(); + } + + public void send_create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request) throws org.apache.thrift.TException + { + create_or_drop_wm_trigger_to_pool_mapping_args args = new create_or_drop_wm_trigger_to_pool_mapping_args(); + args.setRequest(request); + sendBase("create_or_drop_wm_trigger_to_pool_mapping", args); + } + + public WMCreateOrDropTriggerToPoolMappingResponse recv_create_or_drop_wm_trigger_to_pool_mapping() throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException + { + create_or_drop_wm_trigger_to_pool_mapping_result result = new create_or_drop_wm_trigger_to_pool_mapping_result(); + receiveBase(result, "create_or_drop_wm_trigger_to_pool_mapping"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + if (result.o4 != null) { + throw result.o4; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "create_or_drop_wm_trigger_to_pool_mapping failed: unknown result"); + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class AsyncClient extends com.facebook.fb303.FacebookService.AsyncClient implements AsyncIface { @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { @@ -11791,6 +11980,166 @@ public WMGetTriggersForResourePlanResponse getResult() throws NoSuchObjectExcept } } + public void create_or_update_wm_pool(WMCreateOrUpdatePoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + create_or_update_wm_pool_call method_call = new create_or_update_wm_pool_call(request, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_update_wm_pool_call extends org.apache.thrift.async.TAsyncMethodCall { + private WMCreateOrUpdatePoolRequest request; + public create_or_update_wm_pool_call(WMCreateOrUpdatePoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.request = request; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("create_or_update_wm_pool", org.apache.thrift.protocol.TMessageType.CALL, 0)); + create_or_update_wm_pool_args args = new create_or_update_wm_pool_args(); + args.setRequest(request); + args.write(prot); + prot.writeMessageEnd(); + } + + public WMCreateOrUpdatePoolResponse getResult() throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_create_or_update_wm_pool(); + } + } + + public void drop_wm_pool(WMDropPoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + drop_wm_pool_call method_call = new drop_wm_pool_call(request, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_pool_call extends org.apache.thrift.async.TAsyncMethodCall { + private WMDropPoolRequest request; + public drop_wm_pool_call(WMDropPoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.request = request; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("drop_wm_pool", org.apache.thrift.protocol.TMessageType.CALL, 0)); + drop_wm_pool_args args = new drop_wm_pool_args(); + args.setRequest(request); + args.write(prot); + prot.writeMessageEnd(); + } + + public WMDropPoolResponse getResult() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_drop_wm_pool(); + } + } + + public void create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + create_or_update_wm_mapping_call method_call = new create_or_update_wm_mapping_call(request, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_update_wm_mapping_call extends org.apache.thrift.async.TAsyncMethodCall { + private WMCreateOrUpdateMappingRequest request; + public create_or_update_wm_mapping_call(WMCreateOrUpdateMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.request = request; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("create_or_update_wm_mapping", org.apache.thrift.protocol.TMessageType.CALL, 0)); + create_or_update_wm_mapping_args args = new create_or_update_wm_mapping_args(); + args.setRequest(request); + args.write(prot); + prot.writeMessageEnd(); + } + + public WMCreateOrUpdateMappingResponse getResult() throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_create_or_update_wm_mapping(); + } + } + + public void drop_wm_mapping(WMDropMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + drop_wm_mapping_call method_call = new drop_wm_mapping_call(request, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_mapping_call extends org.apache.thrift.async.TAsyncMethodCall { + private WMDropMappingRequest request; + public drop_wm_mapping_call(WMDropMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.request = request; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("drop_wm_mapping", org.apache.thrift.protocol.TMessageType.CALL, 0)); + drop_wm_mapping_args args = new drop_wm_mapping_args(); + args.setRequest(request); + args.write(prot); + prot.writeMessageEnd(); + } + + public WMDropMappingResponse getResult() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_drop_wm_mapping(); + } + } + + public void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + create_or_drop_wm_trigger_to_pool_mapping_call method_call = new create_or_drop_wm_trigger_to_pool_mapping_call(request, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_drop_wm_trigger_to_pool_mapping_call extends org.apache.thrift.async.TAsyncMethodCall { + private WMCreateOrDropTriggerToPoolMappingRequest request; + public create_or_drop_wm_trigger_to_pool_mapping_call(WMCreateOrDropTriggerToPoolMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.request = request; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("create_or_drop_wm_trigger_to_pool_mapping", org.apache.thrift.protocol.TMessageType.CALL, 0)); + create_or_drop_wm_trigger_to_pool_mapping_args args = new create_or_drop_wm_trigger_to_pool_mapping_args(); + args.setRequest(request); + args.write(prot); + prot.writeMessageEnd(); + } + + public WMCreateOrDropTriggerToPoolMappingResponse getResult() throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_create_or_drop_wm_trigger_to_pool_mapping(); + } + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Processor extends com.facebook.fb303.FacebookService.Processor implements org.apache.thrift.TProcessor { @@ -11977,6 +12326,11 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public create_or_update_wm_pool() { + super("create_or_update_wm_pool"); + } + + public create_or_update_wm_pool_args getEmptyArgsInstance() { + return new create_or_update_wm_pool_args(); + } + + protected boolean isOneway() { + return false; + } + + public create_or_update_wm_pool_result getResult(I iface, create_or_update_wm_pool_args args) throws org.apache.thrift.TException { + create_or_update_wm_pool_result result = new create_or_update_wm_pool_result(); + try { + result.success = iface.create_or_update_wm_pool(args.request); + } catch (AlreadyExistsException o1) { + result.o1 = o1; + } catch (NoSuchObjectException o2) { + result.o2 = o2; + } catch (InvalidObjectException o3) { + result.o3 = o3; + } catch (MetaException o4) { + result.o4 = o4; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_pool extends org.apache.thrift.ProcessFunction { + public drop_wm_pool() { + super("drop_wm_pool"); + } + + public drop_wm_pool_args getEmptyArgsInstance() { + return new drop_wm_pool_args(); + } + + protected boolean isOneway() { + return false; + } + + public drop_wm_pool_result getResult(I iface, drop_wm_pool_args args) throws org.apache.thrift.TException { + drop_wm_pool_result result = new drop_wm_pool_result(); + try { + result.success = iface.drop_wm_pool(args.request); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (InvalidOperationException o2) { + result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_update_wm_mapping extends org.apache.thrift.ProcessFunction { + public create_or_update_wm_mapping() { + super("create_or_update_wm_mapping"); + } + + public create_or_update_wm_mapping_args getEmptyArgsInstance() { + return new create_or_update_wm_mapping_args(); + } + + protected boolean isOneway() { + return false; + } + + public create_or_update_wm_mapping_result getResult(I iface, create_or_update_wm_mapping_args args) throws org.apache.thrift.TException { + create_or_update_wm_mapping_result result = new create_or_update_wm_mapping_result(); + try { + result.success = iface.create_or_update_wm_mapping(args.request); + } catch (AlreadyExistsException o1) { + result.o1 = o1; + } catch (NoSuchObjectException o2) { + result.o2 = o2; + } catch (InvalidObjectException o3) { + result.o3 = o3; + } catch (MetaException o4) { + result.o4 = o4; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_mapping extends org.apache.thrift.ProcessFunction { + public drop_wm_mapping() { + super("drop_wm_mapping"); + } + + public drop_wm_mapping_args getEmptyArgsInstance() { + return new drop_wm_mapping_args(); + } + + protected boolean isOneway() { + return false; + } + + public drop_wm_mapping_result getResult(I iface, drop_wm_mapping_args args) throws org.apache.thrift.TException { + drop_wm_mapping_result result = new drop_wm_mapping_result(); + try { + result.success = iface.drop_wm_mapping(args.request); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (InvalidOperationException o2) { + result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_drop_wm_trigger_to_pool_mapping extends org.apache.thrift.ProcessFunction { + public create_or_drop_wm_trigger_to_pool_mapping() { + super("create_or_drop_wm_trigger_to_pool_mapping"); + } + + public create_or_drop_wm_trigger_to_pool_mapping_args getEmptyArgsInstance() { + return new create_or_drop_wm_trigger_to_pool_mapping_args(); + } + + protected boolean isOneway() { + return false; + } + + public create_or_drop_wm_trigger_to_pool_mapping_result getResult(I iface, create_or_drop_wm_trigger_to_pool_mapping_args args) throws org.apache.thrift.TException { + create_or_drop_wm_trigger_to_pool_mapping_result result = new create_or_drop_wm_trigger_to_pool_mapping_result(); + try { + result.success = iface.create_or_drop_wm_trigger_to_pool_mapping(args.request); + } catch (AlreadyExistsException o1) { + result.o1 = o1; + } catch (NoSuchObjectException o2) { + result.o2 = o2; + } catch (InvalidObjectException o3) { + result.o3 = o3; + } catch (MetaException o4) { + result.o4 = o4; + } + return result; + } + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class AsyncProcessor extends com.facebook.fb303.FacebookService.AsyncProcessor { @@ -16582,6 +17082,11 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction { + public create_or_update_wm_pool() { + super("create_or_update_wm_pool"); + } + + public create_or_update_wm_pool_args getEmptyArgsInstance() { + return new create_or_update_wm_pool_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(WMCreateOrUpdatePoolResponse o) { + create_or_update_wm_pool_result result = new create_or_update_wm_pool_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + create_or_update_wm_pool_result result = new create_or_update_wm_pool_result(); + if (e instanceof AlreadyExistsException) { + result.o1 = (AlreadyExistsException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof InvalidObjectException) { + result.o3 = (InvalidObjectException) e; + result.setO3IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o4 = (MetaException) e; + result.setO4IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, create_or_update_wm_pool_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.create_or_update_wm_pool(args.request,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_pool extends org.apache.thrift.AsyncProcessFunction { + public drop_wm_pool() { + super("drop_wm_pool"); + } + + public drop_wm_pool_args getEmptyArgsInstance() { + return new drop_wm_pool_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(WMDropPoolResponse o) { + drop_wm_pool_result result = new drop_wm_pool_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + drop_wm_pool_result result = new drop_wm_pool_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof InvalidOperationException) { + result.o2 = (InvalidOperationException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, drop_wm_pool_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.drop_wm_pool(args.request,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_update_wm_mapping extends org.apache.thrift.AsyncProcessFunction { + public create_or_update_wm_mapping() { + super("create_or_update_wm_mapping"); + } + + public create_or_update_wm_mapping_args getEmptyArgsInstance() { + return new create_or_update_wm_mapping_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(WMCreateOrUpdateMappingResponse o) { + create_or_update_wm_mapping_result result = new create_or_update_wm_mapping_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + create_or_update_wm_mapping_result result = new create_or_update_wm_mapping_result(); + if (e instanceof AlreadyExistsException) { + result.o1 = (AlreadyExistsException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof InvalidObjectException) { + result.o3 = (InvalidObjectException) e; + result.setO3IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o4 = (MetaException) e; + result.setO4IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, create_or_update_wm_mapping_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.create_or_update_wm_mapping(args.request,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_mapping extends org.apache.thrift.AsyncProcessFunction { + public drop_wm_mapping() { + super("drop_wm_mapping"); + } + + public drop_wm_mapping_args getEmptyArgsInstance() { + return new drop_wm_mapping_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(WMDropMappingResponse o) { + drop_wm_mapping_result result = new drop_wm_mapping_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + drop_wm_mapping_result result = new drop_wm_mapping_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof InvalidOperationException) { + result.o2 = (InvalidOperationException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, drop_wm_mapping_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.drop_wm_mapping(args.request,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_drop_wm_trigger_to_pool_mapping extends org.apache.thrift.AsyncProcessFunction { + public create_or_drop_wm_trigger_to_pool_mapping() { + super("create_or_drop_wm_trigger_to_pool_mapping"); + } + + public create_or_drop_wm_trigger_to_pool_mapping_args getEmptyArgsInstance() { + return new create_or_drop_wm_trigger_to_pool_mapping_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(WMCreateOrDropTriggerToPoolMappingResponse o) { + create_or_drop_wm_trigger_to_pool_mapping_result result = new create_or_drop_wm_trigger_to_pool_mapping_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + create_or_drop_wm_trigger_to_pool_mapping_result result = new create_or_drop_wm_trigger_to_pool_mapping_result(); + if (e instanceof AlreadyExistsException) { + result.o1 = (AlreadyExistsException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof InvalidObjectException) { + result.o3 = (InvalidObjectException) e; + result.setO3IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o4 = (MetaException) e; + result.setO4IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, create_or_drop_wm_trigger_to_pool_mapping_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.create_or_drop_wm_trigger_to_pool_mapping(args.request,resultHandler); + } + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class getMetaConf_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { @@ -191014,7 +191869,4626 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_metastore_db_uuid_result("); + StringBuilder sb = new StringBuilder("get_metastore_db_uuid_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_metastore_db_uuid_resultStandardSchemeFactory implements SchemeFactory { + public get_metastore_db_uuid_resultStandardScheme getScheme() { + return new get_metastore_db_uuid_resultStandardScheme(); + } + } + + private static class get_metastore_db_uuid_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_metastore_db_uuid_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.success = iprot.readString(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_metastore_db_uuid_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeString(struct.success); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_metastore_db_uuid_resultTupleSchemeFactory implements SchemeFactory { + public get_metastore_db_uuid_resultTupleScheme getScheme() { + return new get_metastore_db_uuid_resultTupleScheme(); + } + } + + private static class get_metastore_db_uuid_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_metastore_db_uuid_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + oprot.writeString(struct.success); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_metastore_db_uuid_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.success = iprot.readString(); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_resource_plan_args"); + + private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new create_resource_plan_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_resource_plan_argsTupleSchemeFactory()); + } + + private WMCreateResourcePlanRequest request; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQUEST((short)1, "request"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQUEST + return REQUEST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateResourcePlanRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_resource_plan_args.class, metaDataMap); + } + + public create_resource_plan_args() { + } + + public create_resource_plan_args( + WMCreateResourcePlanRequest request) + { + this(); + this.request = request; + } + + /** + * Performs a deep copy on other. + */ + public create_resource_plan_args(create_resource_plan_args other) { + if (other.isSetRequest()) { + this.request = new WMCreateResourcePlanRequest(other.request); + } + } + + public create_resource_plan_args deepCopy() { + return new create_resource_plan_args(this); + } + + @Override + public void clear() { + this.request = null; + } + + public WMCreateResourcePlanRequest getRequest() { + return this.request; + } + + public void setRequest(WMCreateResourcePlanRequest request) { + this.request = request; + } + + public void unsetRequest() { + this.request = null; + } + + /** Returns true if field request is set (has been assigned a value) and false otherwise */ + public boolean isSetRequest() { + return this.request != null; + } + + public void setRequestIsSet(boolean value) { + if (!value) { + this.request = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQUEST: + if (value == null) { + unsetRequest(); + } else { + setRequest((WMCreateResourcePlanRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQUEST: + return getRequest(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQUEST: + return isSetRequest(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof create_resource_plan_args) + return this.equals((create_resource_plan_args)that); + return false; + } + + public boolean equals(create_resource_plan_args that) { + if (that == null) + return false; + + boolean this_present_request = true && this.isSetRequest(); + boolean that_present_request = true && that.isSetRequest(); + if (this_present_request || that_present_request) { + if (!(this_present_request && that_present_request)) + return false; + if (!this.request.equals(that.request)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_request = true && (isSetRequest()); + list.add(present_request); + if (present_request) + list.add(request); + + return list.hashCode(); + } + + @Override + public int compareTo(create_resource_plan_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRequest()).compareTo(other.isSetRequest()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRequest()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.request, other.request); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("create_resource_plan_args("); + boolean first = true; + + sb.append("request:"); + if (this.request == null) { + sb.append("null"); + } else { + sb.append(this.request); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (request != null) { + request.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class create_resource_plan_argsStandardSchemeFactory implements SchemeFactory { + public create_resource_plan_argsStandardScheme getScheme() { + return new create_resource_plan_argsStandardScheme(); + } + } + + private static class create_resource_plan_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, create_resource_plan_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQUEST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.request = new WMCreateResourcePlanRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, create_resource_plan_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.request != null) { + oprot.writeFieldBegin(REQUEST_FIELD_DESC); + struct.request.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class create_resource_plan_argsTupleSchemeFactory implements SchemeFactory { + public create_resource_plan_argsTupleScheme getScheme() { + return new create_resource_plan_argsTupleScheme(); + } + } + + private static class create_resource_plan_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, create_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRequest()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRequest()) { + struct.request.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, create_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.request = new WMCreateResourcePlanRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_resource_plan_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new create_resource_plan_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_resource_plan_resultTupleSchemeFactory()); + } + + private WMCreateResourcePlanResponse success; // required + private AlreadyExistsException o1; // required + private InvalidObjectException o2; // required + private MetaException o3; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateResourcePlanResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_resource_plan_result.class, metaDataMap); + } + + public create_resource_plan_result() { + } + + public create_resource_plan_result( + WMCreateResourcePlanResponse success, + AlreadyExistsException o1, + InvalidObjectException o2, + MetaException o3) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + } + + /** + * Performs a deep copy on other. + */ + public create_resource_plan_result(create_resource_plan_result other) { + if (other.isSetSuccess()) { + this.success = new WMCreateResourcePlanResponse(other.success); + } + if (other.isSetO1()) { + this.o1 = new AlreadyExistsException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } + } + + public create_resource_plan_result deepCopy() { + return new create_resource_plan_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + this.o3 = null; + } + + public WMCreateResourcePlanResponse getSuccess() { + return this.success; + } + + public void setSuccess(WMCreateResourcePlanResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public AlreadyExistsException getO1() { + return this.o1; + } + + public void setO1(AlreadyExistsException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public InvalidObjectException getO2() { + return this.o2; + } + + public void setO2(InvalidObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((WMCreateResourcePlanResponse)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((AlreadyExistsException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof create_resource_plan_result) + return this.equals((create_resource_plan_result)that); + return false; + } + + public boolean equals(create_resource_plan_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + + return list.hashCode(); + } + + @Override + public int compareTo(create_resource_plan_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("create_resource_plan_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class create_resource_plan_resultStandardSchemeFactory implements SchemeFactory { + public create_resource_plan_resultStandardScheme getScheme() { + return new create_resource_plan_resultStandardScheme(); + } + } + + private static class create_resource_plan_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, create_resource_plan_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new WMCreateResourcePlanResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new AlreadyExistsException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new InvalidObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, create_resource_plan_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class create_resource_plan_resultTupleSchemeFactory implements SchemeFactory { + public create_resource_plan_resultTupleScheme getScheme() { + return new create_resource_plan_resultTupleScheme(); + } + } + + private static class create_resource_plan_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, create_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + if (struct.isSetO3()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, create_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(4); + if (incoming.get(0)) { + struct.success = new WMCreateResourcePlanResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new AlreadyExistsException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new InvalidObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(3)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_resource_plan_args"); + + private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_resource_plan_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_resource_plan_argsTupleSchemeFactory()); + } + + private WMGetResourcePlanRequest request; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQUEST((short)1, "request"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQUEST + return REQUEST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetResourcePlanRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_resource_plan_args.class, metaDataMap); + } + + public get_resource_plan_args() { + } + + public get_resource_plan_args( + WMGetResourcePlanRequest request) + { + this(); + this.request = request; + } + + /** + * Performs a deep copy on other. + */ + public get_resource_plan_args(get_resource_plan_args other) { + if (other.isSetRequest()) { + this.request = new WMGetResourcePlanRequest(other.request); + } + } + + public get_resource_plan_args deepCopy() { + return new get_resource_plan_args(this); + } + + @Override + public void clear() { + this.request = null; + } + + public WMGetResourcePlanRequest getRequest() { + return this.request; + } + + public void setRequest(WMGetResourcePlanRequest request) { + this.request = request; + } + + public void unsetRequest() { + this.request = null; + } + + /** Returns true if field request is set (has been assigned a value) and false otherwise */ + public boolean isSetRequest() { + return this.request != null; + } + + public void setRequestIsSet(boolean value) { + if (!value) { + this.request = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQUEST: + if (value == null) { + unsetRequest(); + } else { + setRequest((WMGetResourcePlanRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQUEST: + return getRequest(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQUEST: + return isSetRequest(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_resource_plan_args) + return this.equals((get_resource_plan_args)that); + return false; + } + + public boolean equals(get_resource_plan_args that) { + if (that == null) + return false; + + boolean this_present_request = true && this.isSetRequest(); + boolean that_present_request = true && that.isSetRequest(); + if (this_present_request || that_present_request) { + if (!(this_present_request && that_present_request)) + return false; + if (!this.request.equals(that.request)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_request = true && (isSetRequest()); + list.add(present_request); + if (present_request) + list.add(request); + + return list.hashCode(); + } + + @Override + public int compareTo(get_resource_plan_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRequest()).compareTo(other.isSetRequest()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRequest()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.request, other.request); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_resource_plan_args("); + boolean first = true; + + sb.append("request:"); + if (this.request == null) { + sb.append("null"); + } else { + sb.append(this.request); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (request != null) { + request.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_resource_plan_argsStandardSchemeFactory implements SchemeFactory { + public get_resource_plan_argsStandardScheme getScheme() { + return new get_resource_plan_argsStandardScheme(); + } + } + + private static class get_resource_plan_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_resource_plan_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQUEST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.request = new WMGetResourcePlanRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_resource_plan_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.request != null) { + oprot.writeFieldBegin(REQUEST_FIELD_DESC); + struct.request.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_resource_plan_argsTupleSchemeFactory implements SchemeFactory { + public get_resource_plan_argsTupleScheme getScheme() { + return new get_resource_plan_argsTupleScheme(); + } + } + + private static class get_resource_plan_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRequest()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRequest()) { + struct.request.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.request = new WMGetResourcePlanRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_resource_plan_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_resource_plan_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_resource_plan_resultTupleSchemeFactory()); + } + + private WMGetResourcePlanResponse success; // required + private NoSuchObjectException o1; // required + private MetaException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetResourcePlanResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_resource_plan_result.class, metaDataMap); + } + + public get_resource_plan_result() { + } + + public get_resource_plan_result( + WMGetResourcePlanResponse success, + NoSuchObjectException o1, + MetaException o2) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public get_resource_plan_result(get_resource_plan_result other) { + if (other.isSetSuccess()) { + this.success = new WMGetResourcePlanResponse(other.success); + } + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public get_resource_plan_result deepCopy() { + return new get_resource_plan_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + } + + public WMGetResourcePlanResponse getSuccess() { + return this.success; + } + + public void setSuccess(WMGetResourcePlanResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((WMGetResourcePlanResponse)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_resource_plan_result) + return this.equals((get_resource_plan_result)that); + return false; + } + + public boolean equals(get_resource_plan_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(get_resource_plan_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_resource_plan_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_resource_plan_resultStandardSchemeFactory implements SchemeFactory { + public get_resource_plan_resultStandardScheme getScheme() { + return new get_resource_plan_resultStandardScheme(); + } + } + + private static class get_resource_plan_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_resource_plan_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new WMGetResourcePlanResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_resource_plan_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_resource_plan_resultTupleSchemeFactory implements SchemeFactory { + public get_resource_plan_resultTupleScheme getScheme() { + return new get_resource_plan_resultTupleScheme(); + } + } + + private static class get_resource_plan_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.success = new WMGetResourcePlanResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_active_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_active_resource_plan_args"); + + private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_active_resource_plan_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_active_resource_plan_argsTupleSchemeFactory()); + } + + private WMGetActiveResourcePlanRequest request; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQUEST((short)1, "request"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQUEST + return REQUEST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetActiveResourcePlanRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_active_resource_plan_args.class, metaDataMap); + } + + public get_active_resource_plan_args() { + } + + public get_active_resource_plan_args( + WMGetActiveResourcePlanRequest request) + { + this(); + this.request = request; + } + + /** + * Performs a deep copy on other. + */ + public get_active_resource_plan_args(get_active_resource_plan_args other) { + if (other.isSetRequest()) { + this.request = new WMGetActiveResourcePlanRequest(other.request); + } + } + + public get_active_resource_plan_args deepCopy() { + return new get_active_resource_plan_args(this); + } + + @Override + public void clear() { + this.request = null; + } + + public WMGetActiveResourcePlanRequest getRequest() { + return this.request; + } + + public void setRequest(WMGetActiveResourcePlanRequest request) { + this.request = request; + } + + public void unsetRequest() { + this.request = null; + } + + /** Returns true if field request is set (has been assigned a value) and false otherwise */ + public boolean isSetRequest() { + return this.request != null; + } + + public void setRequestIsSet(boolean value) { + if (!value) { + this.request = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQUEST: + if (value == null) { + unsetRequest(); + } else { + setRequest((WMGetActiveResourcePlanRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQUEST: + return getRequest(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQUEST: + return isSetRequest(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_active_resource_plan_args) + return this.equals((get_active_resource_plan_args)that); + return false; + } + + public boolean equals(get_active_resource_plan_args that) { + if (that == null) + return false; + + boolean this_present_request = true && this.isSetRequest(); + boolean that_present_request = true && that.isSetRequest(); + if (this_present_request || that_present_request) { + if (!(this_present_request && that_present_request)) + return false; + if (!this.request.equals(that.request)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_request = true && (isSetRequest()); + list.add(present_request); + if (present_request) + list.add(request); + + return list.hashCode(); + } + + @Override + public int compareTo(get_active_resource_plan_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRequest()).compareTo(other.isSetRequest()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRequest()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.request, other.request); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_active_resource_plan_args("); + boolean first = true; + + sb.append("request:"); + if (this.request == null) { + sb.append("null"); + } else { + sb.append(this.request); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (request != null) { + request.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_active_resource_plan_argsStandardSchemeFactory implements SchemeFactory { + public get_active_resource_plan_argsStandardScheme getScheme() { + return new get_active_resource_plan_argsStandardScheme(); + } + } + + private static class get_active_resource_plan_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_active_resource_plan_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQUEST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.request = new WMGetActiveResourcePlanRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_active_resource_plan_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.request != null) { + oprot.writeFieldBegin(REQUEST_FIELD_DESC); + struct.request.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_active_resource_plan_argsTupleSchemeFactory implements SchemeFactory { + public get_active_resource_plan_argsTupleScheme getScheme() { + return new get_active_resource_plan_argsTupleScheme(); + } + } + + private static class get_active_resource_plan_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_active_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRequest()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRequest()) { + struct.request.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_active_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.request = new WMGetActiveResourcePlanRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_active_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_active_resource_plan_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_active_resource_plan_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_active_resource_plan_resultTupleSchemeFactory()); + } + + private WMGetActiveResourcePlanResponse success; // required + private MetaException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O2((short)1, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetActiveResourcePlanResponse.class))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_active_resource_plan_result.class, metaDataMap); + } + + public get_active_resource_plan_result() { + } + + public get_active_resource_plan_result( + WMGetActiveResourcePlanResponse success, + MetaException o2) + { + this(); + this.success = success; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public get_active_resource_plan_result(get_active_resource_plan_result other) { + if (other.isSetSuccess()) { + this.success = new WMGetActiveResourcePlanResponse(other.success); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public get_active_resource_plan_result deepCopy() { + return new get_active_resource_plan_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o2 = null; + } + + public WMGetActiveResourcePlanResponse getSuccess() { + return this.success; + } + + public void setSuccess(WMGetActiveResourcePlanResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((WMGetActiveResourcePlanResponse)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_active_resource_plan_result) + return this.equals((get_active_resource_plan_result)that); + return false; + } + + public boolean equals(get_active_resource_plan_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(get_active_resource_plan_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_active_resource_plan_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_active_resource_plan_resultStandardSchemeFactory implements SchemeFactory { + public get_active_resource_plan_resultStandardScheme getScheme() { + return new get_active_resource_plan_resultStandardScheme(); + } + } + + private static class get_active_resource_plan_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_active_resource_plan_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new WMGetActiveResourcePlanResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_active_resource_plan_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_active_resource_plan_resultTupleSchemeFactory implements SchemeFactory { + public get_active_resource_plan_resultTupleScheme getScheme() { + return new get_active_resource_plan_resultTupleScheme(); + } + } + + private static class get_active_resource_plan_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_active_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_active_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.success = new WMGetActiveResourcePlanResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_all_resource_plans_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_all_resource_plans_args"); + + private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_all_resource_plans_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_all_resource_plans_argsTupleSchemeFactory()); + } + + private WMGetAllResourcePlanRequest request; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQUEST((short)1, "request"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQUEST + return REQUEST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetAllResourcePlanRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_all_resource_plans_args.class, metaDataMap); + } + + public get_all_resource_plans_args() { + } + + public get_all_resource_plans_args( + WMGetAllResourcePlanRequest request) + { + this(); + this.request = request; + } + + /** + * Performs a deep copy on other. + */ + public get_all_resource_plans_args(get_all_resource_plans_args other) { + if (other.isSetRequest()) { + this.request = new WMGetAllResourcePlanRequest(other.request); + } + } + + public get_all_resource_plans_args deepCopy() { + return new get_all_resource_plans_args(this); + } + + @Override + public void clear() { + this.request = null; + } + + public WMGetAllResourcePlanRequest getRequest() { + return this.request; + } + + public void setRequest(WMGetAllResourcePlanRequest request) { + this.request = request; + } + + public void unsetRequest() { + this.request = null; + } + + /** Returns true if field request is set (has been assigned a value) and false otherwise */ + public boolean isSetRequest() { + return this.request != null; + } + + public void setRequestIsSet(boolean value) { + if (!value) { + this.request = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQUEST: + if (value == null) { + unsetRequest(); + } else { + setRequest((WMGetAllResourcePlanRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQUEST: + return getRequest(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQUEST: + return isSetRequest(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_all_resource_plans_args) + return this.equals((get_all_resource_plans_args)that); + return false; + } + + public boolean equals(get_all_resource_plans_args that) { + if (that == null) + return false; + + boolean this_present_request = true && this.isSetRequest(); + boolean that_present_request = true && that.isSetRequest(); + if (this_present_request || that_present_request) { + if (!(this_present_request && that_present_request)) + return false; + if (!this.request.equals(that.request)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_request = true && (isSetRequest()); + list.add(present_request); + if (present_request) + list.add(request); + + return list.hashCode(); + } + + @Override + public int compareTo(get_all_resource_plans_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRequest()).compareTo(other.isSetRequest()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRequest()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.request, other.request); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_all_resource_plans_args("); + boolean first = true; + + sb.append("request:"); + if (this.request == null) { + sb.append("null"); + } else { + sb.append(this.request); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (request != null) { + request.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_all_resource_plans_argsStandardSchemeFactory implements SchemeFactory { + public get_all_resource_plans_argsStandardScheme getScheme() { + return new get_all_resource_plans_argsStandardScheme(); + } + } + + private static class get_all_resource_plans_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_resource_plans_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQUEST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.request = new WMGetAllResourcePlanRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_resource_plans_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.request != null) { + oprot.writeFieldBegin(REQUEST_FIELD_DESC); + struct.request.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_all_resource_plans_argsTupleSchemeFactory implements SchemeFactory { + public get_all_resource_plans_argsTupleScheme getScheme() { + return new get_all_resource_plans_argsTupleScheme(); + } + } + + private static class get_all_resource_plans_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_all_resource_plans_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRequest()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRequest()) { + struct.request.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_all_resource_plans_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.request = new WMGetAllResourcePlanRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_all_resource_plans_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_all_resource_plans_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_all_resource_plans_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_all_resource_plans_resultTupleSchemeFactory()); + } + + private WMGetAllResourcePlanResponse success; // required + private MetaException o1; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetAllResourcePlanResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_all_resource_plans_result.class, metaDataMap); + } + + public get_all_resource_plans_result() { + } + + public get_all_resource_plans_result( + WMGetAllResourcePlanResponse success, + MetaException o1) + { + this(); + this.success = success; + this.o1 = o1; + } + + /** + * Performs a deep copy on other. + */ + public get_all_resource_plans_result(get_all_resource_plans_result other) { + if (other.isSetSuccess()) { + this.success = new WMGetAllResourcePlanResponse(other.success); + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + } + + public get_all_resource_plans_result deepCopy() { + return new get_all_resource_plans_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + } + + public WMGetAllResourcePlanResponse getSuccess() { + return this.success; + } + + public void setSuccess(WMGetAllResourcePlanResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((WMGetAllResourcePlanResponse)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_all_resource_plans_result) + return this.equals((get_all_resource_plans_result)that); + return false; + } + + public boolean equals(get_all_resource_plans_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + return list.hashCode(); + } + + @Override + public int compareTo(get_all_resource_plans_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_all_resource_plans_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_all_resource_plans_resultStandardSchemeFactory implements SchemeFactory { + public get_all_resource_plans_resultStandardScheme getScheme() { + return new get_all_resource_plans_resultStandardScheme(); + } + } + + private static class get_all_resource_plans_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_resource_plans_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new WMGetAllResourcePlanResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_resource_plans_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_all_resource_plans_resultTupleSchemeFactory implements SchemeFactory { + public get_all_resource_plans_resultTupleScheme getScheme() { + return new get_all_resource_plans_resultTupleScheme(); + } + } + + private static class get_all_resource_plans_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_all_resource_plans_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_all_resource_plans_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.success = new WMGetAllResourcePlanResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_resource_plan_args"); + + private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new alter_resource_plan_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_resource_plan_argsTupleSchemeFactory()); + } + + private WMAlterResourcePlanRequest request; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQUEST((short)1, "request"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQUEST + return REQUEST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMAlterResourcePlanRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_resource_plan_args.class, metaDataMap); + } + + public alter_resource_plan_args() { + } + + public alter_resource_plan_args( + WMAlterResourcePlanRequest request) + { + this(); + this.request = request; + } + + /** + * Performs a deep copy on other. + */ + public alter_resource_plan_args(alter_resource_plan_args other) { + if (other.isSetRequest()) { + this.request = new WMAlterResourcePlanRequest(other.request); + } + } + + public alter_resource_plan_args deepCopy() { + return new alter_resource_plan_args(this); + } + + @Override + public void clear() { + this.request = null; + } + + public WMAlterResourcePlanRequest getRequest() { + return this.request; + } + + public void setRequest(WMAlterResourcePlanRequest request) { + this.request = request; + } + + public void unsetRequest() { + this.request = null; + } + + /** Returns true if field request is set (has been assigned a value) and false otherwise */ + public boolean isSetRequest() { + return this.request != null; + } + + public void setRequestIsSet(boolean value) { + if (!value) { + this.request = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQUEST: + if (value == null) { + unsetRequest(); + } else { + setRequest((WMAlterResourcePlanRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQUEST: + return getRequest(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQUEST: + return isSetRequest(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof alter_resource_plan_args) + return this.equals((alter_resource_plan_args)that); + return false; + } + + public boolean equals(alter_resource_plan_args that) { + if (that == null) + return false; + + boolean this_present_request = true && this.isSetRequest(); + boolean that_present_request = true && that.isSetRequest(); + if (this_present_request || that_present_request) { + if (!(this_present_request && that_present_request)) + return false; + if (!this.request.equals(that.request)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_request = true && (isSetRequest()); + list.add(present_request); + if (present_request) + list.add(request); + + return list.hashCode(); + } + + @Override + public int compareTo(alter_resource_plan_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRequest()).compareTo(other.isSetRequest()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRequest()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.request, other.request); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("alter_resource_plan_args("); + boolean first = true; + + sb.append("request:"); + if (this.request == null) { + sb.append("null"); + } else { + sb.append(this.request); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (request != null) { + request.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class alter_resource_plan_argsStandardSchemeFactory implements SchemeFactory { + public alter_resource_plan_argsStandardScheme getScheme() { + return new alter_resource_plan_argsStandardScheme(); + } + } + + private static class alter_resource_plan_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_resource_plan_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQUEST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.request = new WMAlterResourcePlanRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_resource_plan_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.request != null) { + oprot.writeFieldBegin(REQUEST_FIELD_DESC); + struct.request.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class alter_resource_plan_argsTupleSchemeFactory implements SchemeFactory { + public alter_resource_plan_argsTupleScheme getScheme() { + return new alter_resource_plan_argsTupleScheme(); + } + } + + private static class alter_resource_plan_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRequest()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRequest()) { + struct.request.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.request = new WMAlterResourcePlanRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_resource_plan_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new alter_resource_plan_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_resource_plan_resultTupleSchemeFactory()); + } + + private WMAlterResourcePlanResponse success; // required + private NoSuchObjectException o1; // required + private InvalidOperationException o2; // required + private MetaException o3; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMAlterResourcePlanResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_resource_plan_result.class, metaDataMap); + } + + public alter_resource_plan_result() { + } + + public alter_resource_plan_result( + WMAlterResourcePlanResponse success, + NoSuchObjectException o1, + InvalidOperationException o2, + MetaException o3) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + } + + /** + * Performs a deep copy on other. + */ + public alter_resource_plan_result(alter_resource_plan_result other) { + if (other.isSetSuccess()) { + this.success = new WMAlterResourcePlanResponse(other.success); + } + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidOperationException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } + } + + public alter_resource_plan_result deepCopy() { + return new alter_resource_plan_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + this.o3 = null; + } + + public WMAlterResourcePlanResponse getSuccess() { + return this.success; + } + + public void setSuccess(WMAlterResourcePlanResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public InvalidOperationException getO2() { + return this.o2; + } + + public void setO2(InvalidOperationException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((WMAlterResourcePlanResponse)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidOperationException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof alter_resource_plan_result) + return this.equals((alter_resource_plan_result)that); + return false; + } + + public boolean equals(alter_resource_plan_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + + return list.hashCode(); + } + + @Override + public int compareTo(alter_resource_plan_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("alter_resource_plan_result("); boolean first = true; sb.append("success:"); @@ -191032,6 +196506,22 @@ public String toString() { sb.append(this.o1); } first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; sb.append(")"); return sb.toString(); } @@ -191039,6 +196529,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -191057,15 +196550,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_metastore_db_uuid_resultStandardSchemeFactory implements SchemeFactory { - public get_metastore_db_uuid_resultStandardScheme getScheme() { - return new get_metastore_db_uuid_resultStandardScheme(); + private static class alter_resource_plan_resultStandardSchemeFactory implements SchemeFactory { + public alter_resource_plan_resultStandardScheme getScheme() { + return new alter_resource_plan_resultStandardScheme(); } } - private static class get_metastore_db_uuid_resultStandardScheme extends StandardScheme { + private static class alter_resource_plan_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_metastore_db_uuid_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_resource_plan_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -191076,8 +196569,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_metastore_db_uu } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.success = iprot.readString(); + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new WMAlterResourcePlanResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -191085,13 +196579,31 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_metastore_db_uu break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new MetaException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new InvalidOperationException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -191101,13 +196613,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_metastore_db_uu struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_metastore_db_uuid_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_resource_plan_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeString(struct.success); + struct.success.write(oprot); oprot.writeFieldEnd(); } if (struct.o1 != null) { @@ -191115,22 +196627,32 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_metastore_db_u struct.o1.write(oprot); oprot.writeFieldEnd(); } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_metastore_db_uuid_resultTupleSchemeFactory implements SchemeFactory { - public get_metastore_db_uuid_resultTupleScheme getScheme() { - return new get_metastore_db_uuid_resultTupleScheme(); + private static class alter_resource_plan_resultTupleSchemeFactory implements SchemeFactory { + public alter_resource_plan_resultTupleScheme getScheme() { + return new alter_resource_plan_resultTupleScheme(); } } - private static class get_metastore_db_uuid_resultTupleScheme extends TupleScheme { + private static class alter_resource_plan_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_metastore_db_uuid_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -191139,45 +196661,68 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_metastore_db_uu if (struct.isSetO1()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetO2()) { + optionals.set(2); + } + if (struct.isSetO3()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetSuccess()) { - oprot.writeString(struct.success); + struct.success.write(oprot); } if (struct.isSetO1()) { struct.o1.write(oprot); } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_metastore_db_uuid_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { - struct.success = iprot.readString(); + struct.success = new WMAlterResourcePlanResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o1 = new MetaException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } + if (incoming.get(2)) { + struct.o2 = new InvalidOperationException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(3)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_resource_plan_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class validate_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("validate_resource_plan_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new create_resource_plan_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new create_resource_plan_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new validate_resource_plan_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new validate_resource_plan_argsTupleSchemeFactory()); } - private WMCreateResourcePlanRequest request; // required + private WMValidateResourcePlanRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -191242,16 +196787,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateResourcePlanRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMValidateResourcePlanRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_resource_plan_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(validate_resource_plan_args.class, metaDataMap); } - public create_resource_plan_args() { + public validate_resource_plan_args() { } - public create_resource_plan_args( - WMCreateResourcePlanRequest request) + public validate_resource_plan_args( + WMValidateResourcePlanRequest request) { this(); this.request = request; @@ -191260,14 +196805,14 @@ public create_resource_plan_args( /** * Performs a deep copy on other. */ - public create_resource_plan_args(create_resource_plan_args other) { + public validate_resource_plan_args(validate_resource_plan_args other) { if (other.isSetRequest()) { - this.request = new WMCreateResourcePlanRequest(other.request); + this.request = new WMValidateResourcePlanRequest(other.request); } } - public create_resource_plan_args deepCopy() { - return new create_resource_plan_args(this); + public validate_resource_plan_args deepCopy() { + return new validate_resource_plan_args(this); } @Override @@ -191275,11 +196820,11 @@ public void clear() { this.request = null; } - public WMCreateResourcePlanRequest getRequest() { + public WMValidateResourcePlanRequest getRequest() { return this.request; } - public void setRequest(WMCreateResourcePlanRequest request) { + public void setRequest(WMValidateResourcePlanRequest request) { this.request = request; } @@ -191304,7 +196849,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((WMCreateResourcePlanRequest)value); + setRequest((WMValidateResourcePlanRequest)value); } break; @@ -191337,12 +196882,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof create_resource_plan_args) - return this.equals((create_resource_plan_args)that); + if (that instanceof validate_resource_plan_args) + return this.equals((validate_resource_plan_args)that); return false; } - public boolean equals(create_resource_plan_args that) { + public boolean equals(validate_resource_plan_args that) { if (that == null) return false; @@ -191371,7 +196916,7 @@ public int hashCode() { } @Override - public int compareTo(create_resource_plan_args other) { + public int compareTo(validate_resource_plan_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -191405,7 +196950,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("create_resource_plan_args("); + StringBuilder sb = new StringBuilder("validate_resource_plan_args("); boolean first = true; sb.append("request:"); @@ -191443,15 +196988,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class create_resource_plan_argsStandardSchemeFactory implements SchemeFactory { - public create_resource_plan_argsStandardScheme getScheme() { - return new create_resource_plan_argsStandardScheme(); + private static class validate_resource_plan_argsStandardSchemeFactory implements SchemeFactory { + public validate_resource_plan_argsStandardScheme getScheme() { + return new validate_resource_plan_argsStandardScheme(); } } - private static class create_resource_plan_argsStandardScheme extends StandardScheme { + private static class validate_resource_plan_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, create_resource_plan_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, validate_resource_plan_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -191463,7 +197008,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_resource_pla switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new WMCreateResourcePlanRequest(); + struct.request = new WMValidateResourcePlanRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -191479,7 +197024,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_resource_pla struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, create_resource_plan_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, validate_resource_plan_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -191494,16 +197039,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_resource_pl } - private static class create_resource_plan_argsTupleSchemeFactory implements SchemeFactory { - public create_resource_plan_argsTupleScheme getScheme() { - return new create_resource_plan_argsTupleScheme(); + private static class validate_resource_plan_argsTupleSchemeFactory implements SchemeFactory { + public validate_resource_plan_argsTupleScheme getScheme() { + return new validate_resource_plan_argsTupleScheme(); } } - private static class create_resource_plan_argsTupleScheme extends TupleScheme { + private static class validate_resource_plan_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, create_resource_plan_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, validate_resource_plan_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -191516,11 +197061,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_resource_pla } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, create_resource_plan_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, validate_resource_plan_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new WMCreateResourcePlanRequest(); + struct.request = new WMValidateResourcePlanRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -191529,31 +197074,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_resource_plan } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_resource_plan_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class validate_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("validate_resource_plan_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); - private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new create_resource_plan_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new create_resource_plan_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new validate_resource_plan_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new validate_resource_plan_resultTupleSchemeFactory()); } - private WMCreateResourcePlanResponse success; // required - private AlreadyExistsException o1; // required - private InvalidObjectException o2; // required - private MetaException o3; // required + private WMValidateResourcePlanResponse success; // required + private NoSuchObjectException o1; // required + private MetaException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { SUCCESS((short)0, "success"), O1((short)1, "o1"), - O2((short)2, "o2"), - O3((short)3, "o3"); + O2((short)2, "o2"); private static final Map byName = new HashMap(); @@ -191574,8 +197116,6 @@ public static _Fields findByThriftId(int fieldId) { return O1; case 2: // O2 return O2; - case 3: // O3 - return O3; default: return null; } @@ -191620,53 +197160,46 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateResourcePlanResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMValidateResourcePlanResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); - tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_resource_plan_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(validate_resource_plan_result.class, metaDataMap); } - public create_resource_plan_result() { + public validate_resource_plan_result() { } - public create_resource_plan_result( - WMCreateResourcePlanResponse success, - AlreadyExistsException o1, - InvalidObjectException o2, - MetaException o3) + public validate_resource_plan_result( + WMValidateResourcePlanResponse success, + NoSuchObjectException o1, + MetaException o2) { this(); this.success = success; this.o1 = o1; this.o2 = o2; - this.o3 = o3; } /** * Performs a deep copy on other. */ - public create_resource_plan_result(create_resource_plan_result other) { + public validate_resource_plan_result(validate_resource_plan_result other) { if (other.isSetSuccess()) { - this.success = new WMCreateResourcePlanResponse(other.success); + this.success = new WMValidateResourcePlanResponse(other.success); } if (other.isSetO1()) { - this.o1 = new AlreadyExistsException(other.o1); + this.o1 = new NoSuchObjectException(other.o1); } if (other.isSetO2()) { - this.o2 = new InvalidObjectException(other.o2); - } - if (other.isSetO3()) { - this.o3 = new MetaException(other.o3); + this.o2 = new MetaException(other.o2); } } - public create_resource_plan_result deepCopy() { - return new create_resource_plan_result(this); + public validate_resource_plan_result deepCopy() { + return new validate_resource_plan_result(this); } @Override @@ -191674,14 +197207,13 @@ public void clear() { this.success = null; this.o1 = null; this.o2 = null; - this.o3 = null; } - public WMCreateResourcePlanResponse getSuccess() { + public WMValidateResourcePlanResponse getSuccess() { return this.success; } - public void setSuccess(WMCreateResourcePlanResponse success) { + public void setSuccess(WMValidateResourcePlanResponse success) { this.success = success; } @@ -191700,11 +197232,11 @@ public void setSuccessIsSet(boolean value) { } } - public AlreadyExistsException getO1() { + public NoSuchObjectException getO1() { return this.o1; } - public void setO1(AlreadyExistsException o1) { + public void setO1(NoSuchObjectException o1) { this.o1 = o1; } @@ -191723,11 +197255,11 @@ public void setO1IsSet(boolean value) { } } - public InvalidObjectException getO2() { + public MetaException getO2() { return this.o2; } - public void setO2(InvalidObjectException o2) { + public void setO2(MetaException o2) { this.o2 = o2; } @@ -191746,36 +197278,13 @@ public void setO2IsSet(boolean value) { } } - public MetaException getO3() { - return this.o3; - } - - public void setO3(MetaException o3) { - this.o3 = o3; - } - - public void unsetO3() { - this.o3 = null; - } - - /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ - public boolean isSetO3() { - return this.o3 != null; - } - - public void setO3IsSet(boolean value) { - if (!value) { - this.o3 = null; - } - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case SUCCESS: if (value == null) { unsetSuccess(); } else { - setSuccess((WMCreateResourcePlanResponse)value); + setSuccess((WMValidateResourcePlanResponse)value); } break; @@ -191783,7 +197292,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO1(); } else { - setO1((AlreadyExistsException)value); + setO1((NoSuchObjectException)value); } break; @@ -191791,15 +197300,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((InvalidObjectException)value); - } - break; - - case O3: - if (value == null) { - unsetO3(); - } else { - setO3((MetaException)value); + setO2((MetaException)value); } break; @@ -191817,9 +197318,6 @@ public Object getFieldValue(_Fields field) { case O2: return getO2(); - case O3: - return getO3(); - } throw new IllegalStateException(); } @@ -191837,8 +197335,6 @@ public boolean isSet(_Fields field) { return isSetO1(); case O2: return isSetO2(); - case O3: - return isSetO3(); } throw new IllegalStateException(); } @@ -191847,12 +197343,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof create_resource_plan_result) - return this.equals((create_resource_plan_result)that); + if (that instanceof validate_resource_plan_result) + return this.equals((validate_resource_plan_result)that); return false; } - public boolean equals(create_resource_plan_result that) { + public boolean equals(validate_resource_plan_result that) { if (that == null) return false; @@ -191883,15 +197379,6 @@ public boolean equals(create_resource_plan_result that) { return false; } - boolean this_present_o3 = true && this.isSetO3(); - boolean that_present_o3 = true && that.isSetO3(); - if (this_present_o3 || that_present_o3) { - if (!(this_present_o3 && that_present_o3)) - return false; - if (!this.o3.equals(that.o3)) - return false; - } - return true; } @@ -191914,16 +197401,11 @@ public int hashCode() { if (present_o2) list.add(o2); - boolean present_o3 = true && (isSetO3()); - list.add(present_o3); - if (present_o3) - list.add(o3); - return list.hashCode(); } @Override - public int compareTo(create_resource_plan_result other) { + public int compareTo(validate_resource_plan_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -191960,16 +197442,6 @@ public int compareTo(create_resource_plan_result other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO3()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -191987,7 +197459,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("create_resource_plan_result("); + StringBuilder sb = new StringBuilder("validate_resource_plan_result("); boolean first = true; sb.append("success:"); @@ -192013,14 +197485,6 @@ public String toString() { sb.append(this.o2); } first = false; - if (!first) sb.append(", "); - sb.append("o3:"); - if (this.o3 == null) { - sb.append("null"); - } else { - sb.append(this.o3); - } - first = false; sb.append(")"); return sb.toString(); } @@ -192049,15 +197513,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class create_resource_plan_resultStandardSchemeFactory implements SchemeFactory { - public create_resource_plan_resultStandardScheme getScheme() { - return new create_resource_plan_resultStandardScheme(); + private static class validate_resource_plan_resultStandardSchemeFactory implements SchemeFactory { + public validate_resource_plan_resultStandardScheme getScheme() { + return new validate_resource_plan_resultStandardScheme(); } } - private static class create_resource_plan_resultStandardScheme extends StandardScheme { + private static class validate_resource_plan_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, create_resource_plan_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, validate_resource_plan_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -192069,7 +197533,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_resource_pla switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new WMCreateResourcePlanResponse(); + struct.success = new WMValidateResourcePlanResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -192078,7 +197542,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_resource_pla break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new AlreadyExistsException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -192087,22 +197551,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_resource_pla break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new InvalidObjectException(); + struct.o2 = new MetaException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // O3 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o3 = new MetaException(); - struct.o3.read(iprot); - struct.setO3IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -192112,7 +197567,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_resource_pla struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, create_resource_plan_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, validate_resource_plan_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -192131,27 +197586,22 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_resource_pl struct.o2.write(oprot); oprot.writeFieldEnd(); } - if (struct.o3 != null) { - oprot.writeFieldBegin(O3_FIELD_DESC); - struct.o3.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class create_resource_plan_resultTupleSchemeFactory implements SchemeFactory { - public create_resource_plan_resultTupleScheme getScheme() { - return new create_resource_plan_resultTupleScheme(); + private static class validate_resource_plan_resultTupleSchemeFactory implements SchemeFactory { + public validate_resource_plan_resultTupleScheme getScheme() { + return new validate_resource_plan_resultTupleScheme(); } } - private static class create_resource_plan_resultTupleScheme extends TupleScheme { + private static class validate_resource_plan_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, create_resource_plan_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, validate_resource_plan_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -192163,10 +197613,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_resource_pla if (struct.isSetO2()) { optionals.set(2); } - if (struct.isSetO3()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); + oprot.writeBitSet(optionals, 3); if (struct.isSetSuccess()) { struct.success.write(oprot); } @@ -192176,52 +197623,44 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_resource_pla if (struct.isSetO2()) { struct.o2.write(oprot); } - if (struct.isSetO3()) { - struct.o3.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, create_resource_plan_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, validate_resource_plan_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.success = new WMCreateResourcePlanResponse(); + struct.success = new WMValidateResourcePlanResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o1 = new AlreadyExistsException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } if (incoming.get(2)) { - struct.o2 = new InvalidObjectException(); + struct.o2 = new MetaException(); struct.o2.read(iprot); struct.setO2IsSet(true); } - if (incoming.get(3)) { - struct.o3 = new MetaException(); - struct.o3.read(iprot); - struct.setO3IsSet(true); - } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_resource_plan_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_resource_plan_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_resource_plan_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_resource_plan_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_resource_plan_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_resource_plan_argsTupleSchemeFactory()); } - private WMGetResourcePlanRequest request; // required + private WMDropResourcePlanRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -192286,16 +197725,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetResourcePlanRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMDropResourcePlanRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_resource_plan_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_resource_plan_args.class, metaDataMap); } - public get_resource_plan_args() { + public drop_resource_plan_args() { } - public get_resource_plan_args( - WMGetResourcePlanRequest request) + public drop_resource_plan_args( + WMDropResourcePlanRequest request) { this(); this.request = request; @@ -192304,14 +197743,14 @@ public get_resource_plan_args( /** * Performs a deep copy on other. */ - public get_resource_plan_args(get_resource_plan_args other) { + public drop_resource_plan_args(drop_resource_plan_args other) { if (other.isSetRequest()) { - this.request = new WMGetResourcePlanRequest(other.request); + this.request = new WMDropResourcePlanRequest(other.request); } } - public get_resource_plan_args deepCopy() { - return new get_resource_plan_args(this); + public drop_resource_plan_args deepCopy() { + return new drop_resource_plan_args(this); } @Override @@ -192319,11 +197758,11 @@ public void clear() { this.request = null; } - public WMGetResourcePlanRequest getRequest() { + public WMDropResourcePlanRequest getRequest() { return this.request; } - public void setRequest(WMGetResourcePlanRequest request) { + public void setRequest(WMDropResourcePlanRequest request) { this.request = request; } @@ -192348,7 +197787,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((WMGetResourcePlanRequest)value); + setRequest((WMDropResourcePlanRequest)value); } break; @@ -192381,12 +197820,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_resource_plan_args) - return this.equals((get_resource_plan_args)that); + if (that instanceof drop_resource_plan_args) + return this.equals((drop_resource_plan_args)that); return false; } - public boolean equals(get_resource_plan_args that) { + public boolean equals(drop_resource_plan_args that) { if (that == null) return false; @@ -192415,7 +197854,7 @@ public int hashCode() { } @Override - public int compareTo(get_resource_plan_args other) { + public int compareTo(drop_resource_plan_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -192449,7 +197888,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_resource_plan_args("); + StringBuilder sb = new StringBuilder("drop_resource_plan_args("); boolean first = true; sb.append("request:"); @@ -192487,15 +197926,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_resource_plan_argsStandardSchemeFactory implements SchemeFactory { - public get_resource_plan_argsStandardScheme getScheme() { - return new get_resource_plan_argsStandardScheme(); + private static class drop_resource_plan_argsStandardSchemeFactory implements SchemeFactory { + public drop_resource_plan_argsStandardScheme getScheme() { + return new drop_resource_plan_argsStandardScheme(); } } - private static class get_resource_plan_argsStandardScheme extends StandardScheme { + private static class drop_resource_plan_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_resource_plan_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_resource_plan_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -192507,7 +197946,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_resource_plan_a switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new WMGetResourcePlanRequest(); + struct.request = new WMDropResourcePlanRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -192523,7 +197962,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_resource_plan_a struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_resource_plan_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_resource_plan_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -192538,16 +197977,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_resource_plan_ } - private static class get_resource_plan_argsTupleSchemeFactory implements SchemeFactory { - public get_resource_plan_argsTupleScheme getScheme() { - return new get_resource_plan_argsTupleScheme(); + private static class drop_resource_plan_argsTupleSchemeFactory implements SchemeFactory { + public drop_resource_plan_argsTupleScheme getScheme() { + return new drop_resource_plan_argsTupleScheme(); } } - private static class get_resource_plan_argsTupleScheme extends TupleScheme { + private static class drop_resource_plan_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -192560,11 +197999,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_a } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new WMGetResourcePlanRequest(); + struct.request = new WMDropResourcePlanRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -192573,28 +198012,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_ar } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_resource_plan_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_resource_plan_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_resource_plan_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_resource_plan_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_resource_plan_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_resource_plan_resultTupleSchemeFactory()); } - private WMGetResourcePlanResponse success; // required + private WMDropResourcePlanResponse success; // required private NoSuchObjectException o1; // required - private MetaException o2; // required + private InvalidOperationException o2; // required + private MetaException o3; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { SUCCESS((short)0, "success"), O1((short)1, "o1"), - O2((short)2, "o2"); + O2((short)2, "o2"), + O3((short)3, "o3"); private static final Map byName = new HashMap(); @@ -192615,6 +198057,8 @@ public static _Fields findByThriftId(int fieldId) { return O1; case 2: // O2 return O2; + case 3: // O3 + return O3; default: return null; } @@ -192659,46 +198103,53 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetResourcePlanResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMDropResourcePlanResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_resource_plan_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_resource_plan_result.class, metaDataMap); } - public get_resource_plan_result() { + public drop_resource_plan_result() { } - public get_resource_plan_result( - WMGetResourcePlanResponse success, + public drop_resource_plan_result( + WMDropResourcePlanResponse success, NoSuchObjectException o1, - MetaException o2) + InvalidOperationException o2, + MetaException o3) { this(); this.success = success; this.o1 = o1; this.o2 = o2; + this.o3 = o3; } /** * Performs a deep copy on other. */ - public get_resource_plan_result(get_resource_plan_result other) { + public drop_resource_plan_result(drop_resource_plan_result other) { if (other.isSetSuccess()) { - this.success = new WMGetResourcePlanResponse(other.success); + this.success = new WMDropResourcePlanResponse(other.success); } if (other.isSetO1()) { this.o1 = new NoSuchObjectException(other.o1); } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new InvalidOperationException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); } } - public get_resource_plan_result deepCopy() { - return new get_resource_plan_result(this); + public drop_resource_plan_result deepCopy() { + return new drop_resource_plan_result(this); } @Override @@ -192706,13 +198157,14 @@ public void clear() { this.success = null; this.o1 = null; this.o2 = null; + this.o3 = null; } - public WMGetResourcePlanResponse getSuccess() { + public WMDropResourcePlanResponse getSuccess() { return this.success; } - public void setSuccess(WMGetResourcePlanResponse success) { + public void setSuccess(WMDropResourcePlanResponse success) { this.success = success; } @@ -192754,11 +198206,11 @@ public void setO1IsSet(boolean value) { } } - public MetaException getO2() { + public InvalidOperationException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(InvalidOperationException o2) { this.o2 = o2; } @@ -192777,13 +198229,36 @@ public void setO2IsSet(boolean value) { } } + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case SUCCESS: if (value == null) { unsetSuccess(); } else { - setSuccess((WMGetResourcePlanResponse)value); + setSuccess((WMDropResourcePlanResponse)value); } break; @@ -192799,7 +198274,15 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((InvalidOperationException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); } break; @@ -192817,6 +198300,9 @@ public Object getFieldValue(_Fields field) { case O2: return getO2(); + case O3: + return getO3(); + } throw new IllegalStateException(); } @@ -192834,6 +198320,8 @@ public boolean isSet(_Fields field) { return isSetO1(); case O2: return isSetO2(); + case O3: + return isSetO3(); } throw new IllegalStateException(); } @@ -192842,12 +198330,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_resource_plan_result) - return this.equals((get_resource_plan_result)that); + if (that instanceof drop_resource_plan_result) + return this.equals((drop_resource_plan_result)that); return false; } - public boolean equals(get_resource_plan_result that) { + public boolean equals(drop_resource_plan_result that) { if (that == null) return false; @@ -192878,6 +198366,15 @@ public boolean equals(get_resource_plan_result that) { return false; } + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + return true; } @@ -192900,11 +198397,16 @@ public int hashCode() { if (present_o2) list.add(o2); + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + return list.hashCode(); } @Override - public int compareTo(get_resource_plan_result other) { + public int compareTo(drop_resource_plan_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -192941,6 +198443,16 @@ public int compareTo(get_resource_plan_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -192958,7 +198470,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_resource_plan_result("); + StringBuilder sb = new StringBuilder("drop_resource_plan_result("); boolean first = true; sb.append("success:"); @@ -192984,6 +198496,14 @@ public String toString() { sb.append(this.o2); } first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; sb.append(")"); return sb.toString(); } @@ -193012,15 +198532,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_resource_plan_resultStandardSchemeFactory implements SchemeFactory { - public get_resource_plan_resultStandardScheme getScheme() { - return new get_resource_plan_resultStandardScheme(); + private static class drop_resource_plan_resultStandardSchemeFactory implements SchemeFactory { + public drop_resource_plan_resultStandardScheme getScheme() { + return new drop_resource_plan_resultStandardScheme(); } } - private static class get_resource_plan_resultStandardScheme extends StandardScheme { + private static class drop_resource_plan_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_resource_plan_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_resource_plan_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -193032,7 +198552,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_resource_plan_r switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new WMGetResourcePlanResponse(); + struct.success = new WMDropResourcePlanResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -193050,13 +198570,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_resource_plan_r break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new MetaException(); + struct.o2 = new InvalidOperationException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -193066,7 +198595,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_resource_plan_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_resource_plan_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_resource_plan_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -193085,22 +198614,27 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_resource_plan_ struct.o2.write(oprot); oprot.writeFieldEnd(); } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_resource_plan_resultTupleSchemeFactory implements SchemeFactory { - public get_resource_plan_resultTupleScheme getScheme() { - return new get_resource_plan_resultTupleScheme(); + private static class drop_resource_plan_resultTupleSchemeFactory implements SchemeFactory { + public drop_resource_plan_resultTupleScheme getScheme() { + return new drop_resource_plan_resultTupleScheme(); } } - private static class get_resource_plan_resultTupleScheme extends TupleScheme { + private static class drop_resource_plan_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -193112,7 +198646,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_r if (struct.isSetO2()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetO3()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetSuccess()) { struct.success.write(oprot); } @@ -193122,14 +198659,17 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_r if (struct.isSetO2()) { struct.o2.write(oprot); } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { - struct.success = new WMGetResourcePlanResponse(); + struct.success = new WMDropResourcePlanResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -193139,27 +198679,32 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_re struct.setO1IsSet(true); } if (incoming.get(2)) { - struct.o2 = new MetaException(); + struct.o2 = new InvalidOperationException(); struct.o2.read(iprot); struct.setO2IsSet(true); } + if (incoming.get(3)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_active_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_active_resource_plan_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_wm_trigger_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_wm_trigger_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_active_resource_plan_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_active_resource_plan_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new create_wm_trigger_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_wm_trigger_argsTupleSchemeFactory()); } - private WMGetActiveResourcePlanRequest request; // required + private WMCreateTriggerRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -193224,16 +198769,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetActiveResourcePlanRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateTriggerRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_active_resource_plan_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_wm_trigger_args.class, metaDataMap); } - public get_active_resource_plan_args() { + public create_wm_trigger_args() { } - public get_active_resource_plan_args( - WMGetActiveResourcePlanRequest request) + public create_wm_trigger_args( + WMCreateTriggerRequest request) { this(); this.request = request; @@ -193242,14 +198787,14 @@ public get_active_resource_plan_args( /** * Performs a deep copy on other. */ - public get_active_resource_plan_args(get_active_resource_plan_args other) { + public create_wm_trigger_args(create_wm_trigger_args other) { if (other.isSetRequest()) { - this.request = new WMGetActiveResourcePlanRequest(other.request); + this.request = new WMCreateTriggerRequest(other.request); } } - public get_active_resource_plan_args deepCopy() { - return new get_active_resource_plan_args(this); + public create_wm_trigger_args deepCopy() { + return new create_wm_trigger_args(this); } @Override @@ -193257,11 +198802,11 @@ public void clear() { this.request = null; } - public WMGetActiveResourcePlanRequest getRequest() { + public WMCreateTriggerRequest getRequest() { return this.request; } - public void setRequest(WMGetActiveResourcePlanRequest request) { + public void setRequest(WMCreateTriggerRequest request) { this.request = request; } @@ -193286,7 +198831,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((WMGetActiveResourcePlanRequest)value); + setRequest((WMCreateTriggerRequest)value); } break; @@ -193319,12 +198864,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_active_resource_plan_args) - return this.equals((get_active_resource_plan_args)that); + if (that instanceof create_wm_trigger_args) + return this.equals((create_wm_trigger_args)that); return false; } - public boolean equals(get_active_resource_plan_args that) { + public boolean equals(create_wm_trigger_args that) { if (that == null) return false; @@ -193353,7 +198898,7 @@ public int hashCode() { } @Override - public int compareTo(get_active_resource_plan_args other) { + public int compareTo(create_wm_trigger_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -193387,7 +198932,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_active_resource_plan_args("); + StringBuilder sb = new StringBuilder("create_wm_trigger_args("); boolean first = true; sb.append("request:"); @@ -193425,15 +198970,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_active_resource_plan_argsStandardSchemeFactory implements SchemeFactory { - public get_active_resource_plan_argsStandardScheme getScheme() { - return new get_active_resource_plan_argsStandardScheme(); + private static class create_wm_trigger_argsStandardSchemeFactory implements SchemeFactory { + public create_wm_trigger_argsStandardScheme getScheme() { + return new create_wm_trigger_argsStandardScheme(); } } - private static class get_active_resource_plan_argsStandardScheme extends StandardScheme { + private static class create_wm_trigger_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_active_resource_plan_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, create_wm_trigger_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -193445,7 +198990,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_active_resource switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new WMGetActiveResourcePlanRequest(); + struct.request = new WMCreateTriggerRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -193461,7 +199006,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_active_resource struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_active_resource_plan_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, create_wm_trigger_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -193476,16 +199021,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_active_resourc } - private static class get_active_resource_plan_argsTupleSchemeFactory implements SchemeFactory { - public get_active_resource_plan_argsTupleScheme getScheme() { - return new get_active_resource_plan_argsTupleScheme(); + private static class create_wm_trigger_argsTupleSchemeFactory implements SchemeFactory { + public create_wm_trigger_argsTupleScheme getScheme() { + return new create_wm_trigger_argsTupleScheme(); } } - private static class get_active_resource_plan_argsTupleScheme extends TupleScheme { + private static class create_wm_trigger_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_active_resource_plan_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, create_wm_trigger_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -193498,11 +199043,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_active_resource } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_active_resource_plan_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, create_wm_trigger_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new WMGetActiveResourcePlanRequest(); + struct.request = new WMCreateTriggerRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -193511,25 +199056,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_active_resource_ } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_active_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_active_resource_plan_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_wm_trigger_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_wm_trigger_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField O4_FIELD_DESC = new org.apache.thrift.protocol.TField("o4", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_active_resource_plan_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_active_resource_plan_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new create_wm_trigger_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_wm_trigger_resultTupleSchemeFactory()); } - private WMGetActiveResourcePlanResponse success; // required - private MetaException o2; // required + private WMCreateTriggerResponse success; // required + private AlreadyExistsException o1; // required + private NoSuchObjectException o2; // required + private InvalidObjectException o3; // required + private MetaException o4; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { SUCCESS((short)0, "success"), - O2((short)1, "o2"); + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"), + O4((short)4, "o4"); private static final Map byName = new HashMap(); @@ -193546,8 +199100,14 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 0: // SUCCESS return SUCCESS; - case 1: // O2 + case 1: // O1 + return O1; + case 2: // O2 return O2; + case 3: // O3 + return O3; + case 4: // O4 + return O4; default: return null; } @@ -193592,52 +199152,76 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetActiveResourcePlanResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateTriggerResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O4, new org.apache.thrift.meta_data.FieldMetaData("o4", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_active_resource_plan_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_wm_trigger_result.class, metaDataMap); } - public get_active_resource_plan_result() { + public create_wm_trigger_result() { } - public get_active_resource_plan_result( - WMGetActiveResourcePlanResponse success, - MetaException o2) + public create_wm_trigger_result( + WMCreateTriggerResponse success, + AlreadyExistsException o1, + NoSuchObjectException o2, + InvalidObjectException o3, + MetaException o4) { this(); this.success = success; + this.o1 = o1; this.o2 = o2; + this.o3 = o3; + this.o4 = o4; } /** * Performs a deep copy on other. */ - public get_active_resource_plan_result(get_active_resource_plan_result other) { + public create_wm_trigger_result(create_wm_trigger_result other) { if (other.isSetSuccess()) { - this.success = new WMGetActiveResourcePlanResponse(other.success); + this.success = new WMCreateTriggerResponse(other.success); + } + if (other.isSetO1()) { + this.o1 = new AlreadyExistsException(other.o1); } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new NoSuchObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new InvalidObjectException(other.o3); + } + if (other.isSetO4()) { + this.o4 = new MetaException(other.o4); } } - public get_active_resource_plan_result deepCopy() { - return new get_active_resource_plan_result(this); + public create_wm_trigger_result deepCopy() { + return new create_wm_trigger_result(this); } @Override public void clear() { this.success = null; + this.o1 = null; this.o2 = null; + this.o3 = null; + this.o4 = null; } - public WMGetActiveResourcePlanResponse getSuccess() { + public WMCreateTriggerResponse getSuccess() { return this.success; } - public void setSuccess(WMGetActiveResourcePlanResponse success) { + public void setSuccess(WMCreateTriggerResponse success) { this.success = success; } @@ -193656,11 +199240,34 @@ public void setSuccessIsSet(boolean value) { } } - public MetaException getO2() { + public AlreadyExistsException getO1() { + return this.o1; + } + + public void setO1(AlreadyExistsException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public NoSuchObjectException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(NoSuchObjectException o2) { this.o2 = o2; } @@ -193679,13 +199286,67 @@ public void setO2IsSet(boolean value) { } } + public InvalidObjectException getO3() { + return this.o3; + } + + public void setO3(InvalidObjectException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public MetaException getO4() { + return this.o4; + } + + public void setO4(MetaException o4) { + this.o4 = o4; + } + + public void unsetO4() { + this.o4 = null; + } + + /** Returns true if field o4 is set (has been assigned a value) and false otherwise */ + public boolean isSetO4() { + return this.o4 != null; + } + + public void setO4IsSet(boolean value) { + if (!value) { + this.o4 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case SUCCESS: if (value == null) { unsetSuccess(); } else { - setSuccess((WMGetActiveResourcePlanResponse)value); + setSuccess((WMCreateTriggerResponse)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((AlreadyExistsException)value); } break; @@ -193693,7 +199354,23 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((NoSuchObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((InvalidObjectException)value); + } + break; + + case O4: + if (value == null) { + unsetO4(); + } else { + setO4((MetaException)value); } break; @@ -193705,9 +199382,18 @@ public Object getFieldValue(_Fields field) { case SUCCESS: return getSuccess(); + case O1: + return getO1(); + case O2: return getO2(); + case O3: + return getO3(); + + case O4: + return getO4(); + } throw new IllegalStateException(); } @@ -193721,8 +199407,14 @@ public boolean isSet(_Fields field) { switch (field) { case SUCCESS: return isSetSuccess(); + case O1: + return isSetO1(); case O2: return isSetO2(); + case O3: + return isSetO3(); + case O4: + return isSetO4(); } throw new IllegalStateException(); } @@ -193731,12 +199423,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_active_resource_plan_result) - return this.equals((get_active_resource_plan_result)that); + if (that instanceof create_wm_trigger_result) + return this.equals((create_wm_trigger_result)that); return false; } - public boolean equals(get_active_resource_plan_result that) { + public boolean equals(create_wm_trigger_result that) { if (that == null) return false; @@ -193749,6 +199441,15 @@ public boolean equals(get_active_resource_plan_result that) { return false; } + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + boolean this_present_o2 = true && this.isSetO2(); boolean that_present_o2 = true && that.isSetO2(); if (this_present_o2 || that_present_o2) { @@ -193758,6 +199459,24 @@ public boolean equals(get_active_resource_plan_result that) { return false; } + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + boolean this_present_o4 = true && this.isSetO4(); + boolean that_present_o4 = true && that.isSetO4(); + if (this_present_o4 || that_present_o4) { + if (!(this_present_o4 && that_present_o4)) + return false; + if (!this.o4.equals(that.o4)) + return false; + } + return true; } @@ -193770,16 +199489,31 @@ public int hashCode() { if (present_success) list.add(success); + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + boolean present_o2 = true && (isSetO2()); list.add(present_o2); if (present_o2) list.add(o2); + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + + boolean present_o4 = true && (isSetO4()); + list.add(present_o4); + if (present_o4) + list.add(o4); + return list.hashCode(); } @Override - public int compareTo(get_active_resource_plan_result other) { + public int compareTo(create_wm_trigger_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -193796,6 +199530,16 @@ public int compareTo(get_active_resource_plan_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); if (lastComparison != 0) { return lastComparison; @@ -193806,6 +199550,26 @@ public int compareTo(get_active_resource_plan_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO4()).compareTo(other.isSetO4()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO4()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o4, other.o4); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -193823,7 +199587,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_active_resource_plan_result("); + StringBuilder sb = new StringBuilder("create_wm_trigger_result("); boolean first = true; sb.append("success:"); @@ -193834,6 +199598,14 @@ public String toString() { } first = false; if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); sb.append("o2:"); if (this.o2 == null) { sb.append("null"); @@ -193841,6 +199613,22 @@ public String toString() { sb.append(this.o2); } first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + if (!first) sb.append(", "); + sb.append("o4:"); + if (this.o4 == null) { + sb.append("null"); + } else { + sb.append(this.o4); + } + first = false; sb.append(")"); return sb.toString(); } @@ -193869,15 +199657,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_active_resource_plan_resultStandardSchemeFactory implements SchemeFactory { - public get_active_resource_plan_resultStandardScheme getScheme() { - return new get_active_resource_plan_resultStandardScheme(); + private static class create_wm_trigger_resultStandardSchemeFactory implements SchemeFactory { + public create_wm_trigger_resultStandardScheme getScheme() { + return new create_wm_trigger_resultStandardScheme(); } } - private static class get_active_resource_plan_resultStandardScheme extends StandardScheme { + private static class create_wm_trigger_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_active_resource_plan_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, create_wm_trigger_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -193889,22 +199677,49 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_active_resource switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new WMGetActiveResourcePlanResponse(); + struct.success = new WMCreateTriggerResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 1: // O2 + case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new MetaException(); + struct.o1 = new AlreadyExistsException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new InvalidObjectException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // O4 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o4 = new MetaException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -193914,7 +199729,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_active_resource struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_active_resource_plan_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, create_wm_trigger_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -193923,75 +199738,123 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_active_resourc struct.success.write(oprot); oprot.writeFieldEnd(); } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } if (struct.o2 != null) { oprot.writeFieldBegin(O2_FIELD_DESC); struct.o2.write(oprot); oprot.writeFieldEnd(); } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o4 != null) { + oprot.writeFieldBegin(O4_FIELD_DESC); + struct.o4.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_active_resource_plan_resultTupleSchemeFactory implements SchemeFactory { - public get_active_resource_plan_resultTupleScheme getScheme() { - return new get_active_resource_plan_resultTupleScheme(); + private static class create_wm_trigger_resultTupleSchemeFactory implements SchemeFactory { + public create_wm_trigger_resultTupleScheme getScheme() { + return new create_wm_trigger_resultTupleScheme(); } } - private static class get_active_resource_plan_resultTupleScheme extends TupleScheme { + private static class create_wm_trigger_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_active_resource_plan_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, create_wm_trigger_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetO2()) { + if (struct.isSetO1()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetO2()) { + optionals.set(2); + } + if (struct.isSetO3()) { + optionals.set(3); + } + if (struct.isSetO4()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetSuccess()) { struct.success.write(oprot); } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } if (struct.isSetO2()) { struct.o2.write(oprot); } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + if (struct.isSetO4()) { + struct.o4.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_active_resource_plan_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, create_wm_trigger_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { - struct.success = new WMGetActiveResourcePlanResponse(); + struct.success = new WMCreateTriggerResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o2 = new MetaException(); + struct.o1 = new AlreadyExistsException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } + if (incoming.get(3)) { + struct.o3 = new InvalidObjectException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + if (incoming.get(4)) { + struct.o4 = new MetaException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_all_resource_plans_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_all_resource_plans_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_wm_trigger_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_wm_trigger_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_all_resource_plans_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_all_resource_plans_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new alter_wm_trigger_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_wm_trigger_argsTupleSchemeFactory()); } - private WMGetAllResourcePlanRequest request; // required + private WMAlterTriggerRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -194056,16 +199919,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetAllResourcePlanRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMAlterTriggerRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_all_resource_plans_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_wm_trigger_args.class, metaDataMap); } - public get_all_resource_plans_args() { + public alter_wm_trigger_args() { } - public get_all_resource_plans_args( - WMGetAllResourcePlanRequest request) + public alter_wm_trigger_args( + WMAlterTriggerRequest request) { this(); this.request = request; @@ -194074,14 +199937,14 @@ public get_all_resource_plans_args( /** * Performs a deep copy on other. */ - public get_all_resource_plans_args(get_all_resource_plans_args other) { + public alter_wm_trigger_args(alter_wm_trigger_args other) { if (other.isSetRequest()) { - this.request = new WMGetAllResourcePlanRequest(other.request); + this.request = new WMAlterTriggerRequest(other.request); } } - public get_all_resource_plans_args deepCopy() { - return new get_all_resource_plans_args(this); + public alter_wm_trigger_args deepCopy() { + return new alter_wm_trigger_args(this); } @Override @@ -194089,11 +199952,11 @@ public void clear() { this.request = null; } - public WMGetAllResourcePlanRequest getRequest() { + public WMAlterTriggerRequest getRequest() { return this.request; } - public void setRequest(WMGetAllResourcePlanRequest request) { + public void setRequest(WMAlterTriggerRequest request) { this.request = request; } @@ -194118,7 +199981,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((WMGetAllResourcePlanRequest)value); + setRequest((WMAlterTriggerRequest)value); } break; @@ -194151,12 +200014,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_all_resource_plans_args) - return this.equals((get_all_resource_plans_args)that); + if (that instanceof alter_wm_trigger_args) + return this.equals((alter_wm_trigger_args)that); return false; } - public boolean equals(get_all_resource_plans_args that) { + public boolean equals(alter_wm_trigger_args that) { if (that == null) return false; @@ -194185,7 +200048,7 @@ public int hashCode() { } @Override - public int compareTo(get_all_resource_plans_args other) { + public int compareTo(alter_wm_trigger_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -194219,7 +200082,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_all_resource_plans_args("); + StringBuilder sb = new StringBuilder("alter_wm_trigger_args("); boolean first = true; sb.append("request:"); @@ -194257,15 +200120,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_all_resource_plans_argsStandardSchemeFactory implements SchemeFactory { - public get_all_resource_plans_argsStandardScheme getScheme() { - return new get_all_resource_plans_argsStandardScheme(); + private static class alter_wm_trigger_argsStandardSchemeFactory implements SchemeFactory { + public alter_wm_trigger_argsStandardScheme getScheme() { + return new alter_wm_trigger_argsStandardScheme(); } } - private static class get_all_resource_plans_argsStandardScheme extends StandardScheme { + private static class alter_wm_trigger_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_resource_plans_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_wm_trigger_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -194277,7 +200140,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_resource_pl switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new WMGetAllResourcePlanRequest(); + struct.request = new WMAlterTriggerRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -194293,7 +200156,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_resource_pl struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_resource_plans_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_wm_trigger_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -194308,16 +200171,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_resource_p } - private static class get_all_resource_plans_argsTupleSchemeFactory implements SchemeFactory { - public get_all_resource_plans_argsTupleScheme getScheme() { - return new get_all_resource_plans_argsTupleScheme(); + private static class alter_wm_trigger_argsTupleSchemeFactory implements SchemeFactory { + public alter_wm_trigger_argsTupleScheme getScheme() { + return new alter_wm_trigger_argsTupleScheme(); } } - private static class get_all_resource_plans_argsTupleScheme extends TupleScheme { + private static class alter_wm_trigger_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_all_resource_plans_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, alter_wm_trigger_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -194330,11 +200193,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_resource_pl } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_all_resource_plans_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, alter_wm_trigger_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new WMGetAllResourcePlanRequest(); + struct.request = new WMAlterTriggerRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -194343,25 +200206,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_resource_pla } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_all_resource_plans_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_all_resource_plans_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_wm_trigger_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_wm_trigger_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_all_resource_plans_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_all_resource_plans_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new alter_wm_trigger_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_wm_trigger_resultTupleSchemeFactory()); } - private WMGetAllResourcePlanResponse success; // required - private MetaException o1; // required + private WMAlterTriggerResponse success; // required + private NoSuchObjectException o1; // required + private InvalidObjectException o2; // required + private MetaException o3; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { SUCCESS((short)0, "success"), - O1((short)1, "o1"); + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"); private static final Map byName = new HashMap(); @@ -194380,6 +200249,10 @@ public static _Fields findByThriftId(int fieldId) { return SUCCESS; case 1: // O1 return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; default: return null; } @@ -194424,52 +200297,68 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetAllResourcePlanResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMAlterTriggerResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_all_resource_plans_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_wm_trigger_result.class, metaDataMap); } - public get_all_resource_plans_result() { + public alter_wm_trigger_result() { } - public get_all_resource_plans_result( - WMGetAllResourcePlanResponse success, - MetaException o1) + public alter_wm_trigger_result( + WMAlterTriggerResponse success, + NoSuchObjectException o1, + InvalidObjectException o2, + MetaException o3) { this(); this.success = success; this.o1 = o1; + this.o2 = o2; + this.o3 = o3; } /** * Performs a deep copy on other. */ - public get_all_resource_plans_result(get_all_resource_plans_result other) { + public alter_wm_trigger_result(alter_wm_trigger_result other) { if (other.isSetSuccess()) { - this.success = new WMGetAllResourcePlanResponse(other.success); + this.success = new WMAlterTriggerResponse(other.success); } if (other.isSetO1()) { - this.o1 = new MetaException(other.o1); + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); } } - public get_all_resource_plans_result deepCopy() { - return new get_all_resource_plans_result(this); + public alter_wm_trigger_result deepCopy() { + return new alter_wm_trigger_result(this); } @Override public void clear() { this.success = null; this.o1 = null; + this.o2 = null; + this.o3 = null; } - public WMGetAllResourcePlanResponse getSuccess() { + public WMAlterTriggerResponse getSuccess() { return this.success; } - public void setSuccess(WMGetAllResourcePlanResponse success) { + public void setSuccess(WMAlterTriggerResponse success) { this.success = success; } @@ -194488,11 +200377,11 @@ public void setSuccessIsSet(boolean value) { } } - public MetaException getO1() { + public NoSuchObjectException getO1() { return this.o1; } - public void setO1(MetaException o1) { + public void setO1(NoSuchObjectException o1) { this.o1 = o1; } @@ -194511,13 +200400,59 @@ public void setO1IsSet(boolean value) { } } + public InvalidObjectException getO2() { + return this.o2; + } + + public void setO2(InvalidObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case SUCCESS: if (value == null) { unsetSuccess(); } else { - setSuccess((WMGetAllResourcePlanResponse)value); + setSuccess((WMAlterTriggerResponse)value); } break; @@ -194525,7 +200460,23 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO1(); } else { - setO1((MetaException)value); + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); } break; @@ -194540,6 +200491,12 @@ public Object getFieldValue(_Fields field) { case O1: return getO1(); + case O2: + return getO2(); + + case O3: + return getO3(); + } throw new IllegalStateException(); } @@ -194555,6 +200512,10 @@ public boolean isSet(_Fields field) { return isSetSuccess(); case O1: return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); } throw new IllegalStateException(); } @@ -194563,12 +200524,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_all_resource_plans_result) - return this.equals((get_all_resource_plans_result)that); + if (that instanceof alter_wm_trigger_result) + return this.equals((alter_wm_trigger_result)that); return false; } - public boolean equals(get_all_resource_plans_result that) { + public boolean equals(alter_wm_trigger_result that) { if (that == null) return false; @@ -194590,6 +200551,24 @@ public boolean equals(get_all_resource_plans_result that) { return false; } + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + return true; } @@ -194607,11 +200586,21 @@ public int hashCode() { if (present_o1) list.add(o1); + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + return list.hashCode(); } @Override - public int compareTo(get_all_resource_plans_result other) { + public int compareTo(alter_wm_trigger_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -194638,6 +200627,26 @@ public int compareTo(get_all_resource_plans_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -194655,7 +200664,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_all_resource_plans_result("); + StringBuilder sb = new StringBuilder("alter_wm_trigger_result("); boolean first = true; sb.append("success:"); @@ -194673,6 +200682,22 @@ public String toString() { sb.append(this.o1); } first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; sb.append(")"); return sb.toString(); } @@ -194701,15 +200726,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_all_resource_plans_resultStandardSchemeFactory implements SchemeFactory { - public get_all_resource_plans_resultStandardScheme getScheme() { - return new get_all_resource_plans_resultStandardScheme(); + private static class alter_wm_trigger_resultStandardSchemeFactory implements SchemeFactory { + public alter_wm_trigger_resultStandardScheme getScheme() { + return new alter_wm_trigger_resultStandardScheme(); } } - private static class get_all_resource_plans_resultStandardScheme extends StandardScheme { + private static class alter_wm_trigger_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_resource_plans_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_wm_trigger_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -194721,7 +200746,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_resource_pl switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new WMGetAllResourcePlanResponse(); + struct.success = new WMAlterTriggerResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -194730,13 +200755,31 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_resource_pl break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new MetaException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new InvalidObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -194746,7 +200789,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_resource_pl struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_resource_plans_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_wm_trigger_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -194760,22 +200803,32 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_resource_p struct.o1.write(oprot); oprot.writeFieldEnd(); } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_all_resource_plans_resultTupleSchemeFactory implements SchemeFactory { - public get_all_resource_plans_resultTupleScheme getScheme() { - return new get_all_resource_plans_resultTupleScheme(); + private static class alter_wm_trigger_resultTupleSchemeFactory implements SchemeFactory { + public alter_wm_trigger_resultTupleScheme getScheme() { + return new alter_wm_trigger_resultTupleScheme(); } } - private static class get_all_resource_plans_resultTupleScheme extends TupleScheme { + private static class alter_wm_trigger_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_all_resource_plans_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, alter_wm_trigger_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -194784,46 +200837,68 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_resource_pl if (struct.isSetO1()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetO2()) { + optionals.set(2); + } + if (struct.isSetO3()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetSuccess()) { struct.success.write(oprot); } if (struct.isSetO1()) { struct.o1.write(oprot); } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_all_resource_plans_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, alter_wm_trigger_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { - struct.success = new WMGetAllResourcePlanResponse(); + struct.success = new WMAlterTriggerResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o1 = new MetaException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } + if (incoming.get(2)) { + struct.o2 = new InvalidObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(3)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_resource_plan_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_trigger_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_wm_trigger_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new alter_resource_plan_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new alter_resource_plan_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_wm_trigger_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_wm_trigger_argsTupleSchemeFactory()); } - private WMAlterResourcePlanRequest request; // required + private WMDropTriggerRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -194888,16 +200963,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMAlterResourcePlanRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMDropTriggerRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_resource_plan_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_wm_trigger_args.class, metaDataMap); } - public alter_resource_plan_args() { + public drop_wm_trigger_args() { } - public alter_resource_plan_args( - WMAlterResourcePlanRequest request) + public drop_wm_trigger_args( + WMDropTriggerRequest request) { this(); this.request = request; @@ -194906,14 +200981,14 @@ public alter_resource_plan_args( /** * Performs a deep copy on other. */ - public alter_resource_plan_args(alter_resource_plan_args other) { + public drop_wm_trigger_args(drop_wm_trigger_args other) { if (other.isSetRequest()) { - this.request = new WMAlterResourcePlanRequest(other.request); + this.request = new WMDropTriggerRequest(other.request); } } - public alter_resource_plan_args deepCopy() { - return new alter_resource_plan_args(this); + public drop_wm_trigger_args deepCopy() { + return new drop_wm_trigger_args(this); } @Override @@ -194921,11 +200996,11 @@ public void clear() { this.request = null; } - public WMAlterResourcePlanRequest getRequest() { + public WMDropTriggerRequest getRequest() { return this.request; } - public void setRequest(WMAlterResourcePlanRequest request) { + public void setRequest(WMDropTriggerRequest request) { this.request = request; } @@ -194950,7 +201025,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((WMAlterResourcePlanRequest)value); + setRequest((WMDropTriggerRequest)value); } break; @@ -194983,12 +201058,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof alter_resource_plan_args) - return this.equals((alter_resource_plan_args)that); + if (that instanceof drop_wm_trigger_args) + return this.equals((drop_wm_trigger_args)that); return false; } - public boolean equals(alter_resource_plan_args that) { + public boolean equals(drop_wm_trigger_args that) { if (that == null) return false; @@ -195017,7 +201092,7 @@ public int hashCode() { } @Override - public int compareTo(alter_resource_plan_args other) { + public int compareTo(drop_wm_trigger_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -195051,7 +201126,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("alter_resource_plan_args("); + StringBuilder sb = new StringBuilder("drop_wm_trigger_args("); boolean first = true; sb.append("request:"); @@ -195089,15 +201164,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class alter_resource_plan_argsStandardSchemeFactory implements SchemeFactory { - public alter_resource_plan_argsStandardScheme getScheme() { - return new alter_resource_plan_argsStandardScheme(); + private static class drop_wm_trigger_argsStandardSchemeFactory implements SchemeFactory { + public drop_wm_trigger_argsStandardScheme getScheme() { + return new drop_wm_trigger_argsStandardScheme(); } } - private static class alter_resource_plan_argsStandardScheme extends StandardScheme { + private static class drop_wm_trigger_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, alter_resource_plan_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_wm_trigger_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -195109,7 +201184,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_resource_plan switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new WMAlterResourcePlanRequest(); + struct.request = new WMDropTriggerRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -195125,7 +201200,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_resource_plan struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, alter_resource_plan_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_wm_trigger_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -195140,16 +201215,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_resource_pla } - private static class alter_resource_plan_argsTupleSchemeFactory implements SchemeFactory { - public alter_resource_plan_argsTupleScheme getScheme() { - return new alter_resource_plan_argsTupleScheme(); + private static class drop_wm_trigger_argsTupleSchemeFactory implements SchemeFactory { + public drop_wm_trigger_argsTupleScheme getScheme() { + return new drop_wm_trigger_argsTupleScheme(); } } - private static class alter_resource_plan_argsTupleScheme extends TupleScheme { + private static class drop_wm_trigger_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_wm_trigger_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -195162,11 +201237,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_wm_trigger_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new WMAlterResourcePlanRequest(); + struct.request = new WMDropTriggerRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -195175,8 +201250,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_ } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_resource_plan_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_trigger_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_wm_trigger_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -195185,11 +201260,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_ private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new alter_resource_plan_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new alter_resource_plan_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_wm_trigger_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_wm_trigger_resultTupleSchemeFactory()); } - private WMAlterResourcePlanResponse success; // required + private WMDropTriggerResponse success; // required private NoSuchObjectException o1; // required private InvalidOperationException o2; // required private MetaException o3; // required @@ -195266,7 +201341,7 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMAlterResourcePlanResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMDropTriggerResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -195274,14 +201349,14 @@ public String getFieldName() { tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_resource_plan_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_wm_trigger_result.class, metaDataMap); } - public alter_resource_plan_result() { + public drop_wm_trigger_result() { } - public alter_resource_plan_result( - WMAlterResourcePlanResponse success, + public drop_wm_trigger_result( + WMDropTriggerResponse success, NoSuchObjectException o1, InvalidOperationException o2, MetaException o3) @@ -195296,9 +201371,9 @@ public alter_resource_plan_result( /** * Performs a deep copy on other. */ - public alter_resource_plan_result(alter_resource_plan_result other) { + public drop_wm_trigger_result(drop_wm_trigger_result other) { if (other.isSetSuccess()) { - this.success = new WMAlterResourcePlanResponse(other.success); + this.success = new WMDropTriggerResponse(other.success); } if (other.isSetO1()) { this.o1 = new NoSuchObjectException(other.o1); @@ -195311,8 +201386,8 @@ public alter_resource_plan_result(alter_resource_plan_result other) { } } - public alter_resource_plan_result deepCopy() { - return new alter_resource_plan_result(this); + public drop_wm_trigger_result deepCopy() { + return new drop_wm_trigger_result(this); } @Override @@ -195323,11 +201398,11 @@ public void clear() { this.o3 = null; } - public WMAlterResourcePlanResponse getSuccess() { + public WMDropTriggerResponse getSuccess() { return this.success; } - public void setSuccess(WMAlterResourcePlanResponse success) { + public void setSuccess(WMDropTriggerResponse success) { this.success = success; } @@ -195421,7 +201496,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((WMAlterResourcePlanResponse)value); + setSuccess((WMDropTriggerResponse)value); } break; @@ -195493,12 +201568,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof alter_resource_plan_result) - return this.equals((alter_resource_plan_result)that); + if (that instanceof drop_wm_trigger_result) + return this.equals((drop_wm_trigger_result)that); return false; } - public boolean equals(alter_resource_plan_result that) { + public boolean equals(drop_wm_trigger_result that) { if (that == null) return false; @@ -195569,7 +201644,7 @@ public int hashCode() { } @Override - public int compareTo(alter_resource_plan_result other) { + public int compareTo(drop_wm_trigger_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -195633,7 +201708,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("alter_resource_plan_result("); + StringBuilder sb = new StringBuilder("drop_wm_trigger_result("); boolean first = true; sb.append("success:"); @@ -195695,15 +201770,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class alter_resource_plan_resultStandardSchemeFactory implements SchemeFactory { - public alter_resource_plan_resultStandardScheme getScheme() { - return new alter_resource_plan_resultStandardScheme(); + private static class drop_wm_trigger_resultStandardSchemeFactory implements SchemeFactory { + public drop_wm_trigger_resultStandardScheme getScheme() { + return new drop_wm_trigger_resultStandardScheme(); } } - private static class alter_resource_plan_resultStandardScheme extends StandardScheme { + private static class drop_wm_trigger_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, alter_resource_plan_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_wm_trigger_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -195715,7 +201790,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_resource_plan switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new WMAlterResourcePlanResponse(); + struct.success = new WMDropTriggerResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -195758,7 +201833,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_resource_plan struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, alter_resource_plan_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_wm_trigger_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -195788,16 +201863,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_resource_pla } - private static class alter_resource_plan_resultTupleSchemeFactory implements SchemeFactory { - public alter_resource_plan_resultTupleScheme getScheme() { - return new alter_resource_plan_resultTupleScheme(); + private static class drop_wm_trigger_resultTupleSchemeFactory implements SchemeFactory { + public drop_wm_trigger_resultTupleScheme getScheme() { + return new drop_wm_trigger_resultTupleScheme(); } } - private static class alter_resource_plan_resultTupleScheme extends TupleScheme { + private static class drop_wm_trigger_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_wm_trigger_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -195828,11 +201903,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_wm_trigger_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { - struct.success = new WMAlterResourcePlanResponse(); + struct.success = new WMDropTriggerResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -195856,18 +201931,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_ } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class validate_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("validate_resource_plan_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_triggers_for_resourceplan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_triggers_for_resourceplan_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new validate_resource_plan_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new validate_resource_plan_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_triggers_for_resourceplan_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_triggers_for_resourceplan_argsTupleSchemeFactory()); } - private WMValidateResourcePlanRequest request; // required + private WMGetTriggersForResourePlanRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -195932,16 +202007,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMValidateResourcePlanRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetTriggersForResourePlanRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(validate_resource_plan_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_triggers_for_resourceplan_args.class, metaDataMap); } - public validate_resource_plan_args() { + public get_triggers_for_resourceplan_args() { } - public validate_resource_plan_args( - WMValidateResourcePlanRequest request) + public get_triggers_for_resourceplan_args( + WMGetTriggersForResourePlanRequest request) { this(); this.request = request; @@ -195950,14 +202025,14 @@ public validate_resource_plan_args( /** * Performs a deep copy on other. */ - public validate_resource_plan_args(validate_resource_plan_args other) { + public get_triggers_for_resourceplan_args(get_triggers_for_resourceplan_args other) { if (other.isSetRequest()) { - this.request = new WMValidateResourcePlanRequest(other.request); + this.request = new WMGetTriggersForResourePlanRequest(other.request); } } - public validate_resource_plan_args deepCopy() { - return new validate_resource_plan_args(this); + public get_triggers_for_resourceplan_args deepCopy() { + return new get_triggers_for_resourceplan_args(this); } @Override @@ -195965,11 +202040,11 @@ public void clear() { this.request = null; } - public WMValidateResourcePlanRequest getRequest() { + public WMGetTriggersForResourePlanRequest getRequest() { return this.request; } - public void setRequest(WMValidateResourcePlanRequest request) { + public void setRequest(WMGetTriggersForResourePlanRequest request) { this.request = request; } @@ -195994,7 +202069,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((WMValidateResourcePlanRequest)value); + setRequest((WMGetTriggersForResourePlanRequest)value); } break; @@ -196027,12 +202102,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof validate_resource_plan_args) - return this.equals((validate_resource_plan_args)that); + if (that instanceof get_triggers_for_resourceplan_args) + return this.equals((get_triggers_for_resourceplan_args)that); return false; } - public boolean equals(validate_resource_plan_args that) { + public boolean equals(get_triggers_for_resourceplan_args that) { if (that == null) return false; @@ -196061,7 +202136,7 @@ public int hashCode() { } @Override - public int compareTo(validate_resource_plan_args other) { + public int compareTo(get_triggers_for_resourceplan_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -196095,7 +202170,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("validate_resource_plan_args("); + StringBuilder sb = new StringBuilder("get_triggers_for_resourceplan_args("); boolean first = true; sb.append("request:"); @@ -196133,15 +202208,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class validate_resource_plan_argsStandardSchemeFactory implements SchemeFactory { - public validate_resource_plan_argsStandardScheme getScheme() { - return new validate_resource_plan_argsStandardScheme(); + private static class get_triggers_for_resourceplan_argsStandardSchemeFactory implements SchemeFactory { + public get_triggers_for_resourceplan_argsStandardScheme getScheme() { + return new get_triggers_for_resourceplan_argsStandardScheme(); } } - private static class validate_resource_plan_argsStandardScheme extends StandardScheme { + private static class get_triggers_for_resourceplan_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, validate_resource_plan_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_triggers_for_resourceplan_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -196153,7 +202228,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, validate_resource_p switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new WMValidateResourcePlanRequest(); + struct.request = new WMGetTriggersForResourePlanRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -196169,7 +202244,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, validate_resource_p struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, validate_resource_plan_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_triggers_for_resourceplan_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -196184,16 +202259,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, validate_resource_ } - private static class validate_resource_plan_argsTupleSchemeFactory implements SchemeFactory { - public validate_resource_plan_argsTupleScheme getScheme() { - return new validate_resource_plan_argsTupleScheme(); + private static class get_triggers_for_resourceplan_argsTupleSchemeFactory implements SchemeFactory { + public get_triggers_for_resourceplan_argsTupleScheme getScheme() { + return new get_triggers_for_resourceplan_argsTupleScheme(); } } - private static class validate_resource_plan_argsTupleScheme extends TupleScheme { + private static class get_triggers_for_resourceplan_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, validate_resource_plan_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_triggers_for_resourceplan_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -196206,11 +202281,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, validate_resource_p } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, validate_resource_plan_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_triggers_for_resourceplan_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new WMValidateResourcePlanRequest(); + struct.request = new WMGetTriggersForResourePlanRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -196219,8 +202294,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, validate_resource_pl } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class validate_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("validate_resource_plan_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_triggers_for_resourceplan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_triggers_for_resourceplan_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -196228,11 +202303,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, validate_resource_pl private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new validate_resource_plan_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new validate_resource_plan_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_triggers_for_resourceplan_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_triggers_for_resourceplan_resultTupleSchemeFactory()); } - private WMValidateResourcePlanResponse success; // required + private WMGetTriggersForResourePlanResponse success; // required private NoSuchObjectException o1; // required private MetaException o2; // required @@ -196305,20 +202380,20 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMValidateResourcePlanResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetTriggersForResourePlanResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(validate_resource_plan_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_triggers_for_resourceplan_result.class, metaDataMap); } - public validate_resource_plan_result() { + public get_triggers_for_resourceplan_result() { } - public validate_resource_plan_result( - WMValidateResourcePlanResponse success, + public get_triggers_for_resourceplan_result( + WMGetTriggersForResourePlanResponse success, NoSuchObjectException o1, MetaException o2) { @@ -196331,9 +202406,9 @@ public validate_resource_plan_result( /** * Performs a deep copy on other. */ - public validate_resource_plan_result(validate_resource_plan_result other) { + public get_triggers_for_resourceplan_result(get_triggers_for_resourceplan_result other) { if (other.isSetSuccess()) { - this.success = new WMValidateResourcePlanResponse(other.success); + this.success = new WMGetTriggersForResourePlanResponse(other.success); } if (other.isSetO1()) { this.o1 = new NoSuchObjectException(other.o1); @@ -196343,8 +202418,8 @@ public validate_resource_plan_result(validate_resource_plan_result other) { } } - public validate_resource_plan_result deepCopy() { - return new validate_resource_plan_result(this); + public get_triggers_for_resourceplan_result deepCopy() { + return new get_triggers_for_resourceplan_result(this); } @Override @@ -196354,11 +202429,11 @@ public void clear() { this.o2 = null; } - public WMValidateResourcePlanResponse getSuccess() { + public WMGetTriggersForResourePlanResponse getSuccess() { return this.success; } - public void setSuccess(WMValidateResourcePlanResponse success) { + public void setSuccess(WMGetTriggersForResourePlanResponse success) { this.success = success; } @@ -196429,7 +202504,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((WMValidateResourcePlanResponse)value); + setSuccess((WMGetTriggersForResourePlanResponse)value); } break; @@ -196488,12 +202563,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof validate_resource_plan_result) - return this.equals((validate_resource_plan_result)that); + if (that instanceof get_triggers_for_resourceplan_result) + return this.equals((get_triggers_for_resourceplan_result)that); return false; } - public boolean equals(validate_resource_plan_result that) { + public boolean equals(get_triggers_for_resourceplan_result that) { if (that == null) return false; @@ -196550,7 +202625,7 @@ public int hashCode() { } @Override - public int compareTo(validate_resource_plan_result other) { + public int compareTo(get_triggers_for_resourceplan_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -196604,7 +202679,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("validate_resource_plan_result("); + StringBuilder sb = new StringBuilder("get_triggers_for_resourceplan_result("); boolean first = true; sb.append("success:"); @@ -196658,15 +202733,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class validate_resource_plan_resultStandardSchemeFactory implements SchemeFactory { - public validate_resource_plan_resultStandardScheme getScheme() { - return new validate_resource_plan_resultStandardScheme(); + private static class get_triggers_for_resourceplan_resultStandardSchemeFactory implements SchemeFactory { + public get_triggers_for_resourceplan_resultStandardScheme getScheme() { + return new get_triggers_for_resourceplan_resultStandardScheme(); } } - private static class validate_resource_plan_resultStandardScheme extends StandardScheme { + private static class get_triggers_for_resourceplan_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, validate_resource_plan_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_triggers_for_resourceplan_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -196678,7 +202753,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, validate_resource_p switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new WMValidateResourcePlanResponse(); + struct.success = new WMGetTriggersForResourePlanResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -196712,7 +202787,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, validate_resource_p struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, validate_resource_plan_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_triggers_for_resourceplan_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -196737,16 +202812,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, validate_resource_ } - private static class validate_resource_plan_resultTupleSchemeFactory implements SchemeFactory { - public validate_resource_plan_resultTupleScheme getScheme() { - return new validate_resource_plan_resultTupleScheme(); + private static class get_triggers_for_resourceplan_resultTupleSchemeFactory implements SchemeFactory { + public get_triggers_for_resourceplan_resultTupleScheme getScheme() { + return new get_triggers_for_resourceplan_resultTupleScheme(); } } - private static class validate_resource_plan_resultTupleScheme extends TupleScheme { + private static class get_triggers_for_resourceplan_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, validate_resource_plan_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_triggers_for_resourceplan_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -196771,11 +202846,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, validate_resource_p } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, validate_resource_plan_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_triggers_for_resourceplan_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.success = new WMValidateResourcePlanResponse(); + struct.success = new WMGetTriggersForResourePlanResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -196794,18 +202869,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, validate_resource_pl } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_resource_plan_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_update_wm_pool_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_or_update_wm_pool_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new drop_resource_plan_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new drop_resource_plan_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new create_or_update_wm_pool_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_or_update_wm_pool_argsTupleSchemeFactory()); } - private WMDropResourcePlanRequest request; // required + private WMCreateOrUpdatePoolRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -196870,16 +202945,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMDropResourcePlanRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateOrUpdatePoolRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_resource_plan_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_or_update_wm_pool_args.class, metaDataMap); } - public drop_resource_plan_args() { + public create_or_update_wm_pool_args() { } - public drop_resource_plan_args( - WMDropResourcePlanRequest request) + public create_or_update_wm_pool_args( + WMCreateOrUpdatePoolRequest request) { this(); this.request = request; @@ -196888,14 +202963,14 @@ public drop_resource_plan_args( /** * Performs a deep copy on other. */ - public drop_resource_plan_args(drop_resource_plan_args other) { + public create_or_update_wm_pool_args(create_or_update_wm_pool_args other) { if (other.isSetRequest()) { - this.request = new WMDropResourcePlanRequest(other.request); + this.request = new WMCreateOrUpdatePoolRequest(other.request); } } - public drop_resource_plan_args deepCopy() { - return new drop_resource_plan_args(this); + public create_or_update_wm_pool_args deepCopy() { + return new create_or_update_wm_pool_args(this); } @Override @@ -196903,11 +202978,11 @@ public void clear() { this.request = null; } - public WMDropResourcePlanRequest getRequest() { + public WMCreateOrUpdatePoolRequest getRequest() { return this.request; } - public void setRequest(WMDropResourcePlanRequest request) { + public void setRequest(WMCreateOrUpdatePoolRequest request) { this.request = request; } @@ -196932,7 +203007,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((WMDropResourcePlanRequest)value); + setRequest((WMCreateOrUpdatePoolRequest)value); } break; @@ -196965,12 +203040,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_resource_plan_args) - return this.equals((drop_resource_plan_args)that); + if (that instanceof create_or_update_wm_pool_args) + return this.equals((create_or_update_wm_pool_args)that); return false; } - public boolean equals(drop_resource_plan_args that) { + public boolean equals(create_or_update_wm_pool_args that) { if (that == null) return false; @@ -196999,7 +203074,7 @@ public int hashCode() { } @Override - public int compareTo(drop_resource_plan_args other) { + public int compareTo(create_or_update_wm_pool_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -197033,7 +203108,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_resource_plan_args("); + StringBuilder sb = new StringBuilder("create_or_update_wm_pool_args("); boolean first = true; sb.append("request:"); @@ -197071,15 +203146,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class drop_resource_plan_argsStandardSchemeFactory implements SchemeFactory { - public drop_resource_plan_argsStandardScheme getScheme() { - return new drop_resource_plan_argsStandardScheme(); + private static class create_or_update_wm_pool_argsStandardSchemeFactory implements SchemeFactory { + public create_or_update_wm_pool_argsStandardScheme getScheme() { + return new create_or_update_wm_pool_argsStandardScheme(); } } - private static class drop_resource_plan_argsStandardScheme extends StandardScheme { + private static class create_or_update_wm_pool_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, drop_resource_plan_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, create_or_update_wm_pool_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -197091,7 +203166,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_resource_plan_ switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new WMDropResourcePlanRequest(); + struct.request = new WMCreateOrUpdatePoolRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -197107,7 +203182,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_resource_plan_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, drop_resource_plan_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, create_or_update_wm_pool_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -197122,16 +203197,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_resource_plan } - private static class drop_resource_plan_argsTupleSchemeFactory implements SchemeFactory { - public drop_resource_plan_argsTupleScheme getScheme() { - return new drop_resource_plan_argsTupleScheme(); + private static class create_or_update_wm_pool_argsTupleSchemeFactory implements SchemeFactory { + public create_or_update_wm_pool_argsTupleScheme getScheme() { + return new create_or_update_wm_pool_argsTupleScheme(); } } - private static class drop_resource_plan_argsTupleScheme extends TupleScheme { + private static class create_or_update_wm_pool_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, create_or_update_wm_pool_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -197144,11 +203219,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_ } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, create_or_update_wm_pool_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new WMDropResourcePlanRequest(); + struct.request = new WMCreateOrUpdatePoolRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -197157,31 +203232,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_a } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_resource_plan_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_update_wm_pool_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_or_update_wm_pool_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField O4_FIELD_DESC = new org.apache.thrift.protocol.TField("o4", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new drop_resource_plan_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new drop_resource_plan_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new create_or_update_wm_pool_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_or_update_wm_pool_resultTupleSchemeFactory()); } - private WMDropResourcePlanResponse success; // required - private NoSuchObjectException o1; // required - private InvalidOperationException o2; // required - private MetaException o3; // required + private WMCreateOrUpdatePoolResponse success; // required + private AlreadyExistsException o1; // required + private NoSuchObjectException o2; // required + private InvalidObjectException o3; // required + private MetaException o4; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { SUCCESS((short)0, "success"), O1((short)1, "o1"), O2((short)2, "o2"), - O3((short)3, "o3"); + O3((short)3, "o3"), + O4((short)4, "o4"); private static final Map byName = new HashMap(); @@ -197204,6 +203282,8 @@ public static _Fields findByThriftId(int fieldId) { return O2; case 3: // O3 return O3; + case 4: // O4 + return O4; default: return null; } @@ -197248,53 +203328,60 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMDropResourcePlanResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateOrUpdatePoolResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O4, new org.apache.thrift.meta_data.FieldMetaData("o4", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_resource_plan_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_or_update_wm_pool_result.class, metaDataMap); } - public drop_resource_plan_result() { + public create_or_update_wm_pool_result() { } - public drop_resource_plan_result( - WMDropResourcePlanResponse success, - NoSuchObjectException o1, - InvalidOperationException o2, - MetaException o3) + public create_or_update_wm_pool_result( + WMCreateOrUpdatePoolResponse success, + AlreadyExistsException o1, + NoSuchObjectException o2, + InvalidObjectException o3, + MetaException o4) { this(); this.success = success; this.o1 = o1; this.o2 = o2; this.o3 = o3; + this.o4 = o4; } /** * Performs a deep copy on other. */ - public drop_resource_plan_result(drop_resource_plan_result other) { + public create_or_update_wm_pool_result(create_or_update_wm_pool_result other) { if (other.isSetSuccess()) { - this.success = new WMDropResourcePlanResponse(other.success); + this.success = new WMCreateOrUpdatePoolResponse(other.success); } if (other.isSetO1()) { - this.o1 = new NoSuchObjectException(other.o1); + this.o1 = new AlreadyExistsException(other.o1); } if (other.isSetO2()) { - this.o2 = new InvalidOperationException(other.o2); + this.o2 = new NoSuchObjectException(other.o2); } if (other.isSetO3()) { - this.o3 = new MetaException(other.o3); + this.o3 = new InvalidObjectException(other.o3); + } + if (other.isSetO4()) { + this.o4 = new MetaException(other.o4); } } - public drop_resource_plan_result deepCopy() { - return new drop_resource_plan_result(this); + public create_or_update_wm_pool_result deepCopy() { + return new create_or_update_wm_pool_result(this); } @Override @@ -197303,13 +203390,14 @@ public void clear() { this.o1 = null; this.o2 = null; this.o3 = null; + this.o4 = null; } - public WMDropResourcePlanResponse getSuccess() { + public WMCreateOrUpdatePoolResponse getSuccess() { return this.success; } - public void setSuccess(WMDropResourcePlanResponse success) { + public void setSuccess(WMCreateOrUpdatePoolResponse success) { this.success = success; } @@ -197328,11 +203416,11 @@ public void setSuccessIsSet(boolean value) { } } - public NoSuchObjectException getO1() { + public AlreadyExistsException getO1() { return this.o1; } - public void setO1(NoSuchObjectException o1) { + public void setO1(AlreadyExistsException o1) { this.o1 = o1; } @@ -197351,11 +203439,11 @@ public void setO1IsSet(boolean value) { } } - public InvalidOperationException getO2() { + public NoSuchObjectException getO2() { return this.o2; } - public void setO2(InvalidOperationException o2) { + public void setO2(NoSuchObjectException o2) { this.o2 = o2; } @@ -197374,11 +203462,11 @@ public void setO2IsSet(boolean value) { } } - public MetaException getO3() { + public InvalidObjectException getO3() { return this.o3; } - public void setO3(MetaException o3) { + public void setO3(InvalidObjectException o3) { this.o3 = o3; } @@ -197397,13 +203485,36 @@ public void setO3IsSet(boolean value) { } } + public MetaException getO4() { + return this.o4; + } + + public void setO4(MetaException o4) { + this.o4 = o4; + } + + public void unsetO4() { + this.o4 = null; + } + + /** Returns true if field o4 is set (has been assigned a value) and false otherwise */ + public boolean isSetO4() { + return this.o4 != null; + } + + public void setO4IsSet(boolean value) { + if (!value) { + this.o4 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case SUCCESS: if (value == null) { unsetSuccess(); } else { - setSuccess((WMDropResourcePlanResponse)value); + setSuccess((WMCreateOrUpdatePoolResponse)value); } break; @@ -197411,7 +203522,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO1(); } else { - setO1((NoSuchObjectException)value); + setO1((AlreadyExistsException)value); } break; @@ -197419,7 +203530,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((InvalidOperationException)value); + setO2((NoSuchObjectException)value); } break; @@ -197427,7 +203538,15 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO3(); } else { - setO3((MetaException)value); + setO3((InvalidObjectException)value); + } + break; + + case O4: + if (value == null) { + unsetO4(); + } else { + setO4((MetaException)value); } break; @@ -197448,6 +203567,9 @@ public Object getFieldValue(_Fields field) { case O3: return getO3(); + case O4: + return getO4(); + } throw new IllegalStateException(); } @@ -197467,6 +203589,8 @@ public boolean isSet(_Fields field) { return isSetO2(); case O3: return isSetO3(); + case O4: + return isSetO4(); } throw new IllegalStateException(); } @@ -197475,12 +203599,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_resource_plan_result) - return this.equals((drop_resource_plan_result)that); + if (that instanceof create_or_update_wm_pool_result) + return this.equals((create_or_update_wm_pool_result)that); return false; } - public boolean equals(drop_resource_plan_result that) { + public boolean equals(create_or_update_wm_pool_result that) { if (that == null) return false; @@ -197520,6 +203644,15 @@ public boolean equals(drop_resource_plan_result that) { return false; } + boolean this_present_o4 = true && this.isSetO4(); + boolean that_present_o4 = true && that.isSetO4(); + if (this_present_o4 || that_present_o4) { + if (!(this_present_o4 && that_present_o4)) + return false; + if (!this.o4.equals(that.o4)) + return false; + } + return true; } @@ -197547,11 +203680,16 @@ public int hashCode() { if (present_o3) list.add(o3); + boolean present_o4 = true && (isSetO4()); + list.add(present_o4); + if (present_o4) + list.add(o4); + return list.hashCode(); } @Override - public int compareTo(drop_resource_plan_result other) { + public int compareTo(create_or_update_wm_pool_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -197598,6 +203736,16 @@ public int compareTo(drop_resource_plan_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO4()).compareTo(other.isSetO4()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO4()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o4, other.o4); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -197615,7 +203763,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_resource_plan_result("); + StringBuilder sb = new StringBuilder("create_or_update_wm_pool_result("); boolean first = true; sb.append("success:"); @@ -197649,6 +203797,14 @@ public String toString() { sb.append(this.o3); } first = false; + if (!first) sb.append(", "); + sb.append("o4:"); + if (this.o4 == null) { + sb.append("null"); + } else { + sb.append(this.o4); + } + first = false; sb.append(")"); return sb.toString(); } @@ -197677,15 +203833,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class drop_resource_plan_resultStandardSchemeFactory implements SchemeFactory { - public drop_resource_plan_resultStandardScheme getScheme() { - return new drop_resource_plan_resultStandardScheme(); + private static class create_or_update_wm_pool_resultStandardSchemeFactory implements SchemeFactory { + public create_or_update_wm_pool_resultStandardScheme getScheme() { + return new create_or_update_wm_pool_resultStandardScheme(); } } - private static class drop_resource_plan_resultStandardScheme extends StandardScheme { + private static class create_or_update_wm_pool_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, drop_resource_plan_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, create_or_update_wm_pool_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -197697,7 +203853,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_resource_plan_ switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new WMDropResourcePlanResponse(); + struct.success = new WMCreateOrUpdatePoolResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -197706,7 +203862,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_resource_plan_ break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new NoSuchObjectException(); + struct.o1 = new AlreadyExistsException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -197715,7 +203871,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_resource_plan_ break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new InvalidOperationException(); + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { @@ -197724,13 +203880,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_resource_plan_ break; case 3: // O3 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o3 = new MetaException(); + struct.o3 = new InvalidObjectException(); struct.o3.read(iprot); struct.setO3IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // O4 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o4 = new MetaException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -197740,7 +203905,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_resource_plan_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, drop_resource_plan_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, create_or_update_wm_pool_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -197764,22 +203929,27 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_resource_plan struct.o3.write(oprot); oprot.writeFieldEnd(); } + if (struct.o4 != null) { + oprot.writeFieldBegin(O4_FIELD_DESC); + struct.o4.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class drop_resource_plan_resultTupleSchemeFactory implements SchemeFactory { - public drop_resource_plan_resultTupleScheme getScheme() { - return new drop_resource_plan_resultTupleScheme(); + private static class create_or_update_wm_pool_resultTupleSchemeFactory implements SchemeFactory { + public create_or_update_wm_pool_resultTupleScheme getScheme() { + return new create_or_update_wm_pool_resultTupleScheme(); } } - private static class drop_resource_plan_resultTupleScheme extends TupleScheme { + private static class create_or_update_wm_pool_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, create_or_update_wm_pool_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -197794,7 +203964,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_ if (struct.isSetO3()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetO4()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetSuccess()) { struct.success.write(oprot); } @@ -197807,49 +203980,57 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_ if (struct.isSetO3()) { struct.o3.write(oprot); } + if (struct.isSetO4()) { + struct.o4.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, create_or_update_wm_pool_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { - struct.success = new WMDropResourcePlanResponse(); + struct.success = new WMCreateOrUpdatePoolResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o1 = new NoSuchObjectException(); + struct.o1 = new AlreadyExistsException(); struct.o1.read(iprot); struct.setO1IsSet(true); } if (incoming.get(2)) { - struct.o2 = new InvalidOperationException(); + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } if (incoming.get(3)) { - struct.o3 = new MetaException(); + struct.o3 = new InvalidObjectException(); struct.o3.read(iprot); struct.setO3IsSet(true); } + if (incoming.get(4)) { + struct.o4 = new MetaException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_wm_trigger_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_wm_trigger_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_pool_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_wm_pool_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new create_wm_trigger_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new create_wm_trigger_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_wm_pool_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_wm_pool_argsTupleSchemeFactory()); } - private WMCreateTriggerRequest request; // required + private WMDropPoolRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -197914,16 +204095,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateTriggerRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMDropPoolRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_wm_trigger_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_wm_pool_args.class, metaDataMap); } - public create_wm_trigger_args() { + public drop_wm_pool_args() { } - public create_wm_trigger_args( - WMCreateTriggerRequest request) + public drop_wm_pool_args( + WMDropPoolRequest request) { this(); this.request = request; @@ -197932,14 +204113,14 @@ public create_wm_trigger_args( /** * Performs a deep copy on other. */ - public create_wm_trigger_args(create_wm_trigger_args other) { + public drop_wm_pool_args(drop_wm_pool_args other) { if (other.isSetRequest()) { - this.request = new WMCreateTriggerRequest(other.request); + this.request = new WMDropPoolRequest(other.request); } } - public create_wm_trigger_args deepCopy() { - return new create_wm_trigger_args(this); + public drop_wm_pool_args deepCopy() { + return new drop_wm_pool_args(this); } @Override @@ -197947,11 +204128,11 @@ public void clear() { this.request = null; } - public WMCreateTriggerRequest getRequest() { + public WMDropPoolRequest getRequest() { return this.request; } - public void setRequest(WMCreateTriggerRequest request) { + public void setRequest(WMDropPoolRequest request) { this.request = request; } @@ -197976,7 +204157,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((WMCreateTriggerRequest)value); + setRequest((WMDropPoolRequest)value); } break; @@ -198009,12 +204190,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof create_wm_trigger_args) - return this.equals((create_wm_trigger_args)that); + if (that instanceof drop_wm_pool_args) + return this.equals((drop_wm_pool_args)that); return false; } - public boolean equals(create_wm_trigger_args that) { + public boolean equals(drop_wm_pool_args that) { if (that == null) return false; @@ -198043,7 +204224,7 @@ public int hashCode() { } @Override - public int compareTo(create_wm_trigger_args other) { + public int compareTo(drop_wm_pool_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -198077,7 +204258,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("create_wm_trigger_args("); + StringBuilder sb = new StringBuilder("drop_wm_pool_args("); boolean first = true; sb.append("request:"); @@ -198115,15 +204296,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class create_wm_trigger_argsStandardSchemeFactory implements SchemeFactory { - public create_wm_trigger_argsStandardScheme getScheme() { - return new create_wm_trigger_argsStandardScheme(); + private static class drop_wm_pool_argsStandardSchemeFactory implements SchemeFactory { + public drop_wm_pool_argsStandardScheme getScheme() { + return new drop_wm_pool_argsStandardScheme(); } } - private static class create_wm_trigger_argsStandardScheme extends StandardScheme { + private static class drop_wm_pool_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, create_wm_trigger_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_wm_pool_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -198135,7 +204316,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_wm_trigger_a switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new WMCreateTriggerRequest(); + struct.request = new WMDropPoolRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -198151,7 +204332,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_wm_trigger_a struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, create_wm_trigger_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_wm_pool_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -198166,16 +204347,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_wm_trigger_ } - private static class create_wm_trigger_argsTupleSchemeFactory implements SchemeFactory { - public create_wm_trigger_argsTupleScheme getScheme() { - return new create_wm_trigger_argsTupleScheme(); + private static class drop_wm_pool_argsTupleSchemeFactory implements SchemeFactory { + public drop_wm_pool_argsTupleScheme getScheme() { + return new drop_wm_pool_argsTupleScheme(); } } - private static class create_wm_trigger_argsTupleScheme extends TupleScheme { + private static class drop_wm_pool_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, create_wm_trigger_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_wm_pool_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -198188,11 +204369,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_wm_trigger_a } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, create_wm_trigger_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_wm_pool_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new WMCreateTriggerRequest(); + struct.request = new WMDropPoolRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -198201,34 +204382,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_wm_trigger_ar } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_wm_trigger_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_wm_trigger_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_pool_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_wm_pool_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); - private static final org.apache.thrift.protocol.TField O4_FIELD_DESC = new org.apache.thrift.protocol.TField("o4", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new create_wm_trigger_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new create_wm_trigger_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_wm_pool_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_wm_pool_resultTupleSchemeFactory()); } - private WMCreateTriggerResponse success; // required - private AlreadyExistsException o1; // required - private NoSuchObjectException o2; // required - private InvalidObjectException o3; // required - private MetaException o4; // required + private WMDropPoolResponse success; // required + private NoSuchObjectException o1; // required + private InvalidOperationException o2; // required + private MetaException o3; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { SUCCESS((short)0, "success"), O1((short)1, "o1"), O2((short)2, "o2"), - O3((short)3, "o3"), - O4((short)4, "o4"); + O3((short)3, "o3"); private static final Map byName = new HashMap(); @@ -198251,8 +204429,6 @@ public static _Fields findByThriftId(int fieldId) { return O2; case 3: // O3 return O3; - case 4: // O4 - return O4; default: return null; } @@ -198297,60 +204473,53 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateTriggerResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMDropPoolResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); - tmpMap.put(_Fields.O4, new org.apache.thrift.meta_data.FieldMetaData("o4", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_wm_trigger_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_wm_pool_result.class, metaDataMap); } - public create_wm_trigger_result() { + public drop_wm_pool_result() { } - public create_wm_trigger_result( - WMCreateTriggerResponse success, - AlreadyExistsException o1, - NoSuchObjectException o2, - InvalidObjectException o3, - MetaException o4) + public drop_wm_pool_result( + WMDropPoolResponse success, + NoSuchObjectException o1, + InvalidOperationException o2, + MetaException o3) { this(); this.success = success; this.o1 = o1; this.o2 = o2; this.o3 = o3; - this.o4 = o4; } /** * Performs a deep copy on other. */ - public create_wm_trigger_result(create_wm_trigger_result other) { + public drop_wm_pool_result(drop_wm_pool_result other) { if (other.isSetSuccess()) { - this.success = new WMCreateTriggerResponse(other.success); + this.success = new WMDropPoolResponse(other.success); } if (other.isSetO1()) { - this.o1 = new AlreadyExistsException(other.o1); + this.o1 = new NoSuchObjectException(other.o1); } if (other.isSetO2()) { - this.o2 = new NoSuchObjectException(other.o2); + this.o2 = new InvalidOperationException(other.o2); } if (other.isSetO3()) { - this.o3 = new InvalidObjectException(other.o3); - } - if (other.isSetO4()) { - this.o4 = new MetaException(other.o4); + this.o3 = new MetaException(other.o3); } } - public create_wm_trigger_result deepCopy() { - return new create_wm_trigger_result(this); + public drop_wm_pool_result deepCopy() { + return new drop_wm_pool_result(this); } @Override @@ -198359,14 +204528,13 @@ public void clear() { this.o1 = null; this.o2 = null; this.o3 = null; - this.o4 = null; } - public WMCreateTriggerResponse getSuccess() { + public WMDropPoolResponse getSuccess() { return this.success; } - public void setSuccess(WMCreateTriggerResponse success) { + public void setSuccess(WMDropPoolResponse success) { this.success = success; } @@ -198385,11 +204553,11 @@ public void setSuccessIsSet(boolean value) { } } - public AlreadyExistsException getO1() { + public NoSuchObjectException getO1() { return this.o1; } - public void setO1(AlreadyExistsException o1) { + public void setO1(NoSuchObjectException o1) { this.o1 = o1; } @@ -198408,11 +204576,11 @@ public void setO1IsSet(boolean value) { } } - public NoSuchObjectException getO2() { + public InvalidOperationException getO2() { return this.o2; } - public void setO2(NoSuchObjectException o2) { + public void setO2(InvalidOperationException o2) { this.o2 = o2; } @@ -198431,11 +204599,11 @@ public void setO2IsSet(boolean value) { } } - public InvalidObjectException getO3() { + public MetaException getO3() { return this.o3; } - public void setO3(InvalidObjectException o3) { + public void setO3(MetaException o3) { this.o3 = o3; } @@ -198454,36 +204622,13 @@ public void setO3IsSet(boolean value) { } } - public MetaException getO4() { - return this.o4; - } - - public void setO4(MetaException o4) { - this.o4 = o4; - } - - public void unsetO4() { - this.o4 = null; - } - - /** Returns true if field o4 is set (has been assigned a value) and false otherwise */ - public boolean isSetO4() { - return this.o4 != null; - } - - public void setO4IsSet(boolean value) { - if (!value) { - this.o4 = null; - } - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case SUCCESS: if (value == null) { unsetSuccess(); } else { - setSuccess((WMCreateTriggerResponse)value); + setSuccess((WMDropPoolResponse)value); } break; @@ -198491,7 +204636,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO1(); } else { - setO1((AlreadyExistsException)value); + setO1((NoSuchObjectException)value); } break; @@ -198499,7 +204644,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((NoSuchObjectException)value); + setO2((InvalidOperationException)value); } break; @@ -198507,15 +204652,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO3(); } else { - setO3((InvalidObjectException)value); - } - break; - - case O4: - if (value == null) { - unsetO4(); - } else { - setO4((MetaException)value); + setO3((MetaException)value); } break; @@ -198536,9 +204673,6 @@ public Object getFieldValue(_Fields field) { case O3: return getO3(); - case O4: - return getO4(); - } throw new IllegalStateException(); } @@ -198558,8 +204692,6 @@ public boolean isSet(_Fields field) { return isSetO2(); case O3: return isSetO3(); - case O4: - return isSetO4(); } throw new IllegalStateException(); } @@ -198568,12 +204700,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof create_wm_trigger_result) - return this.equals((create_wm_trigger_result)that); + if (that instanceof drop_wm_pool_result) + return this.equals((drop_wm_pool_result)that); return false; } - public boolean equals(create_wm_trigger_result that) { + public boolean equals(drop_wm_pool_result that) { if (that == null) return false; @@ -198613,15 +204745,6 @@ public boolean equals(create_wm_trigger_result that) { return false; } - boolean this_present_o4 = true && this.isSetO4(); - boolean that_present_o4 = true && that.isSetO4(); - if (this_present_o4 || that_present_o4) { - if (!(this_present_o4 && that_present_o4)) - return false; - if (!this.o4.equals(that.o4)) - return false; - } - return true; } @@ -198649,16 +204772,11 @@ public int hashCode() { if (present_o3) list.add(o3); - boolean present_o4 = true && (isSetO4()); - list.add(present_o4); - if (present_o4) - list.add(o4); - return list.hashCode(); } @Override - public int compareTo(create_wm_trigger_result other) { + public int compareTo(drop_wm_pool_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -198705,16 +204823,6 @@ public int compareTo(create_wm_trigger_result other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetO4()).compareTo(other.isSetO4()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO4()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o4, other.o4); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -198732,7 +204840,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("create_wm_trigger_result("); + StringBuilder sb = new StringBuilder("drop_wm_pool_result("); boolean first = true; sb.append("success:"); @@ -198766,14 +204874,6 @@ public String toString() { sb.append(this.o3); } first = false; - if (!first) sb.append(", "); - sb.append("o4:"); - if (this.o4 == null) { - sb.append("null"); - } else { - sb.append(this.o4); - } - first = false; sb.append(")"); return sb.toString(); } @@ -198802,15 +204902,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class create_wm_trigger_resultStandardSchemeFactory implements SchemeFactory { - public create_wm_trigger_resultStandardScheme getScheme() { - return new create_wm_trigger_resultStandardScheme(); + private static class drop_wm_pool_resultStandardSchemeFactory implements SchemeFactory { + public drop_wm_pool_resultStandardScheme getScheme() { + return new drop_wm_pool_resultStandardScheme(); } } - private static class create_wm_trigger_resultStandardScheme extends StandardScheme { + private static class drop_wm_pool_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, create_wm_trigger_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_wm_pool_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -198822,7 +204922,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_wm_trigger_r switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new WMCreateTriggerResponse(); + struct.success = new WMDropPoolResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -198831,7 +204931,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_wm_trigger_r break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new AlreadyExistsException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -198840,7 +204940,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_wm_trigger_r break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new NoSuchObjectException(); + struct.o2 = new InvalidOperationException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { @@ -198849,22 +204949,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_wm_trigger_r break; case 3: // O3 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o3 = new InvalidObjectException(); + struct.o3 = new MetaException(); struct.o3.read(iprot); struct.setO3IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // O4 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o4 = new MetaException(); - struct.o4.read(iprot); - struct.setO4IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -198874,7 +204965,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_wm_trigger_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, create_wm_trigger_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_wm_pool_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -198898,27 +204989,22 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_wm_trigger_ struct.o3.write(oprot); oprot.writeFieldEnd(); } - if (struct.o4 != null) { - oprot.writeFieldBegin(O4_FIELD_DESC); - struct.o4.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class create_wm_trigger_resultTupleSchemeFactory implements SchemeFactory { - public create_wm_trigger_resultTupleScheme getScheme() { - return new create_wm_trigger_resultTupleScheme(); + private static class drop_wm_pool_resultTupleSchemeFactory implements SchemeFactory { + public drop_wm_pool_resultTupleScheme getScheme() { + return new drop_wm_pool_resultTupleScheme(); } } - private static class create_wm_trigger_resultTupleScheme extends TupleScheme { + private static class drop_wm_pool_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, create_wm_trigger_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_wm_pool_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -198933,10 +205019,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_wm_trigger_r if (struct.isSetO3()) { optionals.set(3); } - if (struct.isSetO4()) { - optionals.set(4); - } - oprot.writeBitSet(optionals, 5); + oprot.writeBitSet(optionals, 4); if (struct.isSetSuccess()) { struct.success.write(oprot); } @@ -198949,57 +205032,49 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_wm_trigger_r if (struct.isSetO3()) { struct.o3.write(oprot); } - if (struct.isSetO4()) { - struct.o4.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, create_wm_trigger_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_wm_pool_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { - struct.success = new WMCreateTriggerResponse(); + struct.success = new WMDropPoolResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o1 = new AlreadyExistsException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } if (incoming.get(2)) { - struct.o2 = new NoSuchObjectException(); + struct.o2 = new InvalidOperationException(); struct.o2.read(iprot); struct.setO2IsSet(true); } if (incoming.get(3)) { - struct.o3 = new InvalidObjectException(); + struct.o3 = new MetaException(); struct.o3.read(iprot); struct.setO3IsSet(true); } - if (incoming.get(4)) { - struct.o4 = new MetaException(); - struct.o4.read(iprot); - struct.setO4IsSet(true); - } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_wm_trigger_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_wm_trigger_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_update_wm_mapping_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_or_update_wm_mapping_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new alter_wm_trigger_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new alter_wm_trigger_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new create_or_update_wm_mapping_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_or_update_wm_mapping_argsTupleSchemeFactory()); } - private WMAlterTriggerRequest request; // required + private WMCreateOrUpdateMappingRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -199064,16 +205139,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMAlterTriggerRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateOrUpdateMappingRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_wm_trigger_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_or_update_wm_mapping_args.class, metaDataMap); } - public alter_wm_trigger_args() { + public create_or_update_wm_mapping_args() { } - public alter_wm_trigger_args( - WMAlterTriggerRequest request) + public create_or_update_wm_mapping_args( + WMCreateOrUpdateMappingRequest request) { this(); this.request = request; @@ -199082,14 +205157,14 @@ public alter_wm_trigger_args( /** * Performs a deep copy on other. */ - public alter_wm_trigger_args(alter_wm_trigger_args other) { + public create_or_update_wm_mapping_args(create_or_update_wm_mapping_args other) { if (other.isSetRequest()) { - this.request = new WMAlterTriggerRequest(other.request); + this.request = new WMCreateOrUpdateMappingRequest(other.request); } } - public alter_wm_trigger_args deepCopy() { - return new alter_wm_trigger_args(this); + public create_or_update_wm_mapping_args deepCopy() { + return new create_or_update_wm_mapping_args(this); } @Override @@ -199097,11 +205172,11 @@ public void clear() { this.request = null; } - public WMAlterTriggerRequest getRequest() { + public WMCreateOrUpdateMappingRequest getRequest() { return this.request; } - public void setRequest(WMAlterTriggerRequest request) { + public void setRequest(WMCreateOrUpdateMappingRequest request) { this.request = request; } @@ -199126,7 +205201,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((WMAlterTriggerRequest)value); + setRequest((WMCreateOrUpdateMappingRequest)value); } break; @@ -199159,12 +205234,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof alter_wm_trigger_args) - return this.equals((alter_wm_trigger_args)that); + if (that instanceof create_or_update_wm_mapping_args) + return this.equals((create_or_update_wm_mapping_args)that); return false; } - public boolean equals(alter_wm_trigger_args that) { + public boolean equals(create_or_update_wm_mapping_args that) { if (that == null) return false; @@ -199193,7 +205268,7 @@ public int hashCode() { } @Override - public int compareTo(alter_wm_trigger_args other) { + public int compareTo(create_or_update_wm_mapping_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -199227,7 +205302,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("alter_wm_trigger_args("); + StringBuilder sb = new StringBuilder("create_or_update_wm_mapping_args("); boolean first = true; sb.append("request:"); @@ -199265,15 +205340,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class alter_wm_trigger_argsStandardSchemeFactory implements SchemeFactory { - public alter_wm_trigger_argsStandardScheme getScheme() { - return new alter_wm_trigger_argsStandardScheme(); + private static class create_or_update_wm_mapping_argsStandardSchemeFactory implements SchemeFactory { + public create_or_update_wm_mapping_argsStandardScheme getScheme() { + return new create_or_update_wm_mapping_argsStandardScheme(); } } - private static class alter_wm_trigger_argsStandardScheme extends StandardScheme { + private static class create_or_update_wm_mapping_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, alter_wm_trigger_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, create_or_update_wm_mapping_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -199285,7 +205360,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_wm_trigger_ar switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new WMAlterTriggerRequest(); + struct.request = new WMCreateOrUpdateMappingRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -199301,7 +205376,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_wm_trigger_ar struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, alter_wm_trigger_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, create_or_update_wm_mapping_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -199316,16 +205391,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_wm_trigger_a } - private static class alter_wm_trigger_argsTupleSchemeFactory implements SchemeFactory { - public alter_wm_trigger_argsTupleScheme getScheme() { - return new alter_wm_trigger_argsTupleScheme(); + private static class create_or_update_wm_mapping_argsTupleSchemeFactory implements SchemeFactory { + public create_or_update_wm_mapping_argsTupleScheme getScheme() { + return new create_or_update_wm_mapping_argsTupleScheme(); } } - private static class alter_wm_trigger_argsTupleScheme extends TupleScheme { + private static class create_or_update_wm_mapping_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, alter_wm_trigger_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, create_or_update_wm_mapping_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -199338,11 +205413,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_wm_trigger_ar } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, alter_wm_trigger_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, create_or_update_wm_mapping_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new WMAlterTriggerRequest(); + struct.request = new WMCreateOrUpdateMappingRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -199351,31 +205426,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_wm_trigger_arg } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_wm_trigger_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_wm_trigger_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_update_wm_mapping_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_or_update_wm_mapping_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField O4_FIELD_DESC = new org.apache.thrift.protocol.TField("o4", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new alter_wm_trigger_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new alter_wm_trigger_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new create_or_update_wm_mapping_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_or_update_wm_mapping_resultTupleSchemeFactory()); } - private WMAlterTriggerResponse success; // required - private NoSuchObjectException o1; // required - private InvalidObjectException o2; // required - private MetaException o3; // required + private WMCreateOrUpdateMappingResponse success; // required + private AlreadyExistsException o1; // required + private NoSuchObjectException o2; // required + private InvalidObjectException o3; // required + private MetaException o4; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { SUCCESS((short)0, "success"), O1((short)1, "o1"), O2((short)2, "o2"), - O3((short)3, "o3"); + O3((short)3, "o3"), + O4((short)4, "o4"); private static final Map byName = new HashMap(); @@ -199398,6 +205476,8 @@ public static _Fields findByThriftId(int fieldId) { return O2; case 3: // O3 return O3; + case 4: // O4 + return O4; default: return null; } @@ -199442,53 +205522,60 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMAlterTriggerResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateOrUpdateMappingResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O4, new org.apache.thrift.meta_data.FieldMetaData("o4", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_wm_trigger_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_or_update_wm_mapping_result.class, metaDataMap); } - public alter_wm_trigger_result() { + public create_or_update_wm_mapping_result() { } - public alter_wm_trigger_result( - WMAlterTriggerResponse success, - NoSuchObjectException o1, - InvalidObjectException o2, - MetaException o3) + public create_or_update_wm_mapping_result( + WMCreateOrUpdateMappingResponse success, + AlreadyExistsException o1, + NoSuchObjectException o2, + InvalidObjectException o3, + MetaException o4) { this(); this.success = success; this.o1 = o1; this.o2 = o2; this.o3 = o3; + this.o4 = o4; } /** * Performs a deep copy on other. */ - public alter_wm_trigger_result(alter_wm_trigger_result other) { + public create_or_update_wm_mapping_result(create_or_update_wm_mapping_result other) { if (other.isSetSuccess()) { - this.success = new WMAlterTriggerResponse(other.success); + this.success = new WMCreateOrUpdateMappingResponse(other.success); } if (other.isSetO1()) { - this.o1 = new NoSuchObjectException(other.o1); + this.o1 = new AlreadyExistsException(other.o1); } if (other.isSetO2()) { - this.o2 = new InvalidObjectException(other.o2); + this.o2 = new NoSuchObjectException(other.o2); } if (other.isSetO3()) { - this.o3 = new MetaException(other.o3); + this.o3 = new InvalidObjectException(other.o3); + } + if (other.isSetO4()) { + this.o4 = new MetaException(other.o4); } } - public alter_wm_trigger_result deepCopy() { - return new alter_wm_trigger_result(this); + public create_or_update_wm_mapping_result deepCopy() { + return new create_or_update_wm_mapping_result(this); } @Override @@ -199497,13 +205584,14 @@ public void clear() { this.o1 = null; this.o2 = null; this.o3 = null; + this.o4 = null; } - public WMAlterTriggerResponse getSuccess() { + public WMCreateOrUpdateMappingResponse getSuccess() { return this.success; } - public void setSuccess(WMAlterTriggerResponse success) { + public void setSuccess(WMCreateOrUpdateMappingResponse success) { this.success = success; } @@ -199522,11 +205610,11 @@ public void setSuccessIsSet(boolean value) { } } - public NoSuchObjectException getO1() { + public AlreadyExistsException getO1() { return this.o1; } - public void setO1(NoSuchObjectException o1) { + public void setO1(AlreadyExistsException o1) { this.o1 = o1; } @@ -199545,11 +205633,11 @@ public void setO1IsSet(boolean value) { } } - public InvalidObjectException getO2() { + public NoSuchObjectException getO2() { return this.o2; } - public void setO2(InvalidObjectException o2) { + public void setO2(NoSuchObjectException o2) { this.o2 = o2; } @@ -199568,11 +205656,11 @@ public void setO2IsSet(boolean value) { } } - public MetaException getO3() { + public InvalidObjectException getO3() { return this.o3; } - public void setO3(MetaException o3) { + public void setO3(InvalidObjectException o3) { this.o3 = o3; } @@ -199591,13 +205679,36 @@ public void setO3IsSet(boolean value) { } } + public MetaException getO4() { + return this.o4; + } + + public void setO4(MetaException o4) { + this.o4 = o4; + } + + public void unsetO4() { + this.o4 = null; + } + + /** Returns true if field o4 is set (has been assigned a value) and false otherwise */ + public boolean isSetO4() { + return this.o4 != null; + } + + public void setO4IsSet(boolean value) { + if (!value) { + this.o4 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case SUCCESS: if (value == null) { unsetSuccess(); } else { - setSuccess((WMAlterTriggerResponse)value); + setSuccess((WMCreateOrUpdateMappingResponse)value); } break; @@ -199605,7 +205716,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO1(); } else { - setO1((NoSuchObjectException)value); + setO1((AlreadyExistsException)value); } break; @@ -199613,7 +205724,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((InvalidObjectException)value); + setO2((NoSuchObjectException)value); } break; @@ -199621,7 +205732,15 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO3(); } else { - setO3((MetaException)value); + setO3((InvalidObjectException)value); + } + break; + + case O4: + if (value == null) { + unsetO4(); + } else { + setO4((MetaException)value); } break; @@ -199642,6 +205761,9 @@ public Object getFieldValue(_Fields field) { case O3: return getO3(); + case O4: + return getO4(); + } throw new IllegalStateException(); } @@ -199661,6 +205783,8 @@ public boolean isSet(_Fields field) { return isSetO2(); case O3: return isSetO3(); + case O4: + return isSetO4(); } throw new IllegalStateException(); } @@ -199669,12 +205793,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof alter_wm_trigger_result) - return this.equals((alter_wm_trigger_result)that); + if (that instanceof create_or_update_wm_mapping_result) + return this.equals((create_or_update_wm_mapping_result)that); return false; } - public boolean equals(alter_wm_trigger_result that) { + public boolean equals(create_or_update_wm_mapping_result that) { if (that == null) return false; @@ -199714,6 +205838,15 @@ public boolean equals(alter_wm_trigger_result that) { return false; } + boolean this_present_o4 = true && this.isSetO4(); + boolean that_present_o4 = true && that.isSetO4(); + if (this_present_o4 || that_present_o4) { + if (!(this_present_o4 && that_present_o4)) + return false; + if (!this.o4.equals(that.o4)) + return false; + } + return true; } @@ -199741,11 +205874,16 @@ public int hashCode() { if (present_o3) list.add(o3); + boolean present_o4 = true && (isSetO4()); + list.add(present_o4); + if (present_o4) + list.add(o4); + return list.hashCode(); } @Override - public int compareTo(alter_wm_trigger_result other) { + public int compareTo(create_or_update_wm_mapping_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -199792,6 +205930,16 @@ public int compareTo(alter_wm_trigger_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO4()).compareTo(other.isSetO4()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO4()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o4, other.o4); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -199809,7 +205957,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("alter_wm_trigger_result("); + StringBuilder sb = new StringBuilder("create_or_update_wm_mapping_result("); boolean first = true; sb.append("success:"); @@ -199843,6 +205991,14 @@ public String toString() { sb.append(this.o3); } first = false; + if (!first) sb.append(", "); + sb.append("o4:"); + if (this.o4 == null) { + sb.append("null"); + } else { + sb.append(this.o4); + } + first = false; sb.append(")"); return sb.toString(); } @@ -199871,15 +206027,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class alter_wm_trigger_resultStandardSchemeFactory implements SchemeFactory { - public alter_wm_trigger_resultStandardScheme getScheme() { - return new alter_wm_trigger_resultStandardScheme(); + private static class create_or_update_wm_mapping_resultStandardSchemeFactory implements SchemeFactory { + public create_or_update_wm_mapping_resultStandardScheme getScheme() { + return new create_or_update_wm_mapping_resultStandardScheme(); } } - private static class alter_wm_trigger_resultStandardScheme extends StandardScheme { + private static class create_or_update_wm_mapping_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, alter_wm_trigger_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, create_or_update_wm_mapping_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -199891,7 +206047,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_wm_trigger_re switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new WMAlterTriggerResponse(); + struct.success = new WMCreateOrUpdateMappingResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -199900,7 +206056,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_wm_trigger_re break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new NoSuchObjectException(); + struct.o1 = new AlreadyExistsException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -199909,7 +206065,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_wm_trigger_re break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new InvalidObjectException(); + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { @@ -199918,13 +206074,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_wm_trigger_re break; case 3: // O3 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o3 = new MetaException(); + struct.o3 = new InvalidObjectException(); struct.o3.read(iprot); struct.setO3IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // O4 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o4 = new MetaException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -199934,7 +206099,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_wm_trigger_re struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, alter_wm_trigger_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, create_or_update_wm_mapping_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -199958,22 +206123,27 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_wm_trigger_r struct.o3.write(oprot); oprot.writeFieldEnd(); } + if (struct.o4 != null) { + oprot.writeFieldBegin(O4_FIELD_DESC); + struct.o4.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class alter_wm_trigger_resultTupleSchemeFactory implements SchemeFactory { - public alter_wm_trigger_resultTupleScheme getScheme() { - return new alter_wm_trigger_resultTupleScheme(); + private static class create_or_update_wm_mapping_resultTupleSchemeFactory implements SchemeFactory { + public create_or_update_wm_mapping_resultTupleScheme getScheme() { + return new create_or_update_wm_mapping_resultTupleScheme(); } } - private static class alter_wm_trigger_resultTupleScheme extends TupleScheme { + private static class create_or_update_wm_mapping_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, alter_wm_trigger_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, create_or_update_wm_mapping_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -199988,7 +206158,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_wm_trigger_re if (struct.isSetO3()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetO4()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetSuccess()) { struct.success.write(oprot); } @@ -200001,49 +206174,57 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_wm_trigger_re if (struct.isSetO3()) { struct.o3.write(oprot); } + if (struct.isSetO4()) { + struct.o4.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, alter_wm_trigger_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, create_or_update_wm_mapping_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { - struct.success = new WMAlterTriggerResponse(); + struct.success = new WMCreateOrUpdateMappingResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o1 = new NoSuchObjectException(); + struct.o1 = new AlreadyExistsException(); struct.o1.read(iprot); struct.setO1IsSet(true); } if (incoming.get(2)) { - struct.o2 = new InvalidObjectException(); + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } if (incoming.get(3)) { - struct.o3 = new MetaException(); + struct.o3 = new InvalidObjectException(); struct.o3.read(iprot); struct.setO3IsSet(true); } + if (incoming.get(4)) { + struct.o4 = new MetaException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_trigger_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_wm_trigger_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_mapping_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_wm_mapping_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new drop_wm_trigger_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new drop_wm_trigger_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_wm_mapping_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_wm_mapping_argsTupleSchemeFactory()); } - private WMDropTriggerRequest request; // required + private WMDropMappingRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -200108,16 +206289,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMDropTriggerRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMDropMappingRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_wm_trigger_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_wm_mapping_args.class, metaDataMap); } - public drop_wm_trigger_args() { + public drop_wm_mapping_args() { } - public drop_wm_trigger_args( - WMDropTriggerRequest request) + public drop_wm_mapping_args( + WMDropMappingRequest request) { this(); this.request = request; @@ -200126,14 +206307,14 @@ public drop_wm_trigger_args( /** * Performs a deep copy on other. */ - public drop_wm_trigger_args(drop_wm_trigger_args other) { + public drop_wm_mapping_args(drop_wm_mapping_args other) { if (other.isSetRequest()) { - this.request = new WMDropTriggerRequest(other.request); + this.request = new WMDropMappingRequest(other.request); } } - public drop_wm_trigger_args deepCopy() { - return new drop_wm_trigger_args(this); + public drop_wm_mapping_args deepCopy() { + return new drop_wm_mapping_args(this); } @Override @@ -200141,11 +206322,11 @@ public void clear() { this.request = null; } - public WMDropTriggerRequest getRequest() { + public WMDropMappingRequest getRequest() { return this.request; } - public void setRequest(WMDropTriggerRequest request) { + public void setRequest(WMDropMappingRequest request) { this.request = request; } @@ -200170,7 +206351,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((WMDropTriggerRequest)value); + setRequest((WMDropMappingRequest)value); } break; @@ -200203,12 +206384,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_wm_trigger_args) - return this.equals((drop_wm_trigger_args)that); + if (that instanceof drop_wm_mapping_args) + return this.equals((drop_wm_mapping_args)that); return false; } - public boolean equals(drop_wm_trigger_args that) { + public boolean equals(drop_wm_mapping_args that) { if (that == null) return false; @@ -200237,7 +206418,7 @@ public int hashCode() { } @Override - public int compareTo(drop_wm_trigger_args other) { + public int compareTo(drop_wm_mapping_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -200271,7 +206452,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_wm_trigger_args("); + StringBuilder sb = new StringBuilder("drop_wm_mapping_args("); boolean first = true; sb.append("request:"); @@ -200309,15 +206490,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class drop_wm_trigger_argsStandardSchemeFactory implements SchemeFactory { - public drop_wm_trigger_argsStandardScheme getScheme() { - return new drop_wm_trigger_argsStandardScheme(); + private static class drop_wm_mapping_argsStandardSchemeFactory implements SchemeFactory { + public drop_wm_mapping_argsStandardScheme getScheme() { + return new drop_wm_mapping_argsStandardScheme(); } } - private static class drop_wm_trigger_argsStandardScheme extends StandardScheme { + private static class drop_wm_mapping_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, drop_wm_trigger_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_wm_mapping_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -200329,7 +206510,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_wm_trigger_arg switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new WMDropTriggerRequest(); + struct.request = new WMDropMappingRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -200345,7 +206526,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_wm_trigger_arg struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, drop_wm_trigger_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_wm_mapping_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -200360,16 +206541,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_wm_trigger_ar } - private static class drop_wm_trigger_argsTupleSchemeFactory implements SchemeFactory { - public drop_wm_trigger_argsTupleScheme getScheme() { - return new drop_wm_trigger_argsTupleScheme(); + private static class drop_wm_mapping_argsTupleSchemeFactory implements SchemeFactory { + public drop_wm_mapping_argsTupleScheme getScheme() { + return new drop_wm_mapping_argsTupleScheme(); } } - private static class drop_wm_trigger_argsTupleScheme extends TupleScheme { + private static class drop_wm_mapping_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, drop_wm_trigger_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_wm_mapping_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -200382,11 +206563,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_wm_trigger_arg } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, drop_wm_trigger_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_wm_mapping_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new WMDropTriggerRequest(); + struct.request = new WMDropMappingRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -200395,8 +206576,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_wm_trigger_args } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_trigger_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_wm_trigger_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_wm_mapping_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_wm_mapping_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -200405,11 +206586,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_wm_trigger_args private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new drop_wm_trigger_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new drop_wm_trigger_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_wm_mapping_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_wm_mapping_resultTupleSchemeFactory()); } - private WMDropTriggerResponse success; // required + private WMDropMappingResponse success; // required private NoSuchObjectException o1; // required private InvalidOperationException o2; // required private MetaException o3; // required @@ -200486,7 +206667,7 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMDropTriggerResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMDropMappingResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -200494,14 +206675,14 @@ public String getFieldName() { tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_wm_trigger_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_wm_mapping_result.class, metaDataMap); } - public drop_wm_trigger_result() { + public drop_wm_mapping_result() { } - public drop_wm_trigger_result( - WMDropTriggerResponse success, + public drop_wm_mapping_result( + WMDropMappingResponse success, NoSuchObjectException o1, InvalidOperationException o2, MetaException o3) @@ -200516,9 +206697,9 @@ public drop_wm_trigger_result( /** * Performs a deep copy on other. */ - public drop_wm_trigger_result(drop_wm_trigger_result other) { + public drop_wm_mapping_result(drop_wm_mapping_result other) { if (other.isSetSuccess()) { - this.success = new WMDropTriggerResponse(other.success); + this.success = new WMDropMappingResponse(other.success); } if (other.isSetO1()) { this.o1 = new NoSuchObjectException(other.o1); @@ -200531,8 +206712,8 @@ public drop_wm_trigger_result(drop_wm_trigger_result other) { } } - public drop_wm_trigger_result deepCopy() { - return new drop_wm_trigger_result(this); + public drop_wm_mapping_result deepCopy() { + return new drop_wm_mapping_result(this); } @Override @@ -200543,11 +206724,11 @@ public void clear() { this.o3 = null; } - public WMDropTriggerResponse getSuccess() { + public WMDropMappingResponse getSuccess() { return this.success; } - public void setSuccess(WMDropTriggerResponse success) { + public void setSuccess(WMDropMappingResponse success) { this.success = success; } @@ -200641,7 +206822,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((WMDropTriggerResponse)value); + setSuccess((WMDropMappingResponse)value); } break; @@ -200713,12 +206894,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_wm_trigger_result) - return this.equals((drop_wm_trigger_result)that); + if (that instanceof drop_wm_mapping_result) + return this.equals((drop_wm_mapping_result)that); return false; } - public boolean equals(drop_wm_trigger_result that) { + public boolean equals(drop_wm_mapping_result that) { if (that == null) return false; @@ -200789,7 +206970,7 @@ public int hashCode() { } @Override - public int compareTo(drop_wm_trigger_result other) { + public int compareTo(drop_wm_mapping_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -200853,7 +207034,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_wm_trigger_result("); + StringBuilder sb = new StringBuilder("drop_wm_mapping_result("); boolean first = true; sb.append("success:"); @@ -200915,15 +207096,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class drop_wm_trigger_resultStandardSchemeFactory implements SchemeFactory { - public drop_wm_trigger_resultStandardScheme getScheme() { - return new drop_wm_trigger_resultStandardScheme(); + private static class drop_wm_mapping_resultStandardSchemeFactory implements SchemeFactory { + public drop_wm_mapping_resultStandardScheme getScheme() { + return new drop_wm_mapping_resultStandardScheme(); } } - private static class drop_wm_trigger_resultStandardScheme extends StandardScheme { + private static class drop_wm_mapping_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, drop_wm_trigger_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_wm_mapping_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -200935,7 +207116,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_wm_trigger_res switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new WMDropTriggerResponse(); + struct.success = new WMDropMappingResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -200978,7 +207159,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_wm_trigger_res struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, drop_wm_trigger_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_wm_mapping_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -201008,16 +207189,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_wm_trigger_re } - private static class drop_wm_trigger_resultTupleSchemeFactory implements SchemeFactory { - public drop_wm_trigger_resultTupleScheme getScheme() { - return new drop_wm_trigger_resultTupleScheme(); + private static class drop_wm_mapping_resultTupleSchemeFactory implements SchemeFactory { + public drop_wm_mapping_resultTupleScheme getScheme() { + return new drop_wm_mapping_resultTupleScheme(); } } - private static class drop_wm_trigger_resultTupleScheme extends TupleScheme { + private static class drop_wm_mapping_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, drop_wm_trigger_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_wm_mapping_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -201048,11 +207229,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_wm_trigger_res } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, drop_wm_trigger_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_wm_mapping_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { - struct.success = new WMDropTriggerResponse(); + struct.success = new WMDropMappingResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -201076,18 +207257,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_wm_trigger_resu } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_triggers_for_resourceplan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_triggers_for_resourceplan_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_drop_wm_trigger_to_pool_mapping_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_or_drop_wm_trigger_to_pool_mapping_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_triggers_for_resourceplan_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_triggers_for_resourceplan_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new create_or_drop_wm_trigger_to_pool_mapping_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_or_drop_wm_trigger_to_pool_mapping_argsTupleSchemeFactory()); } - private WMGetTriggersForResourePlanRequest request; // required + private WMCreateOrDropTriggerToPoolMappingRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -201152,16 +207333,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetTriggersForResourePlanRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateOrDropTriggerToPoolMappingRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_triggers_for_resourceplan_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_or_drop_wm_trigger_to_pool_mapping_args.class, metaDataMap); } - public get_triggers_for_resourceplan_args() { + public create_or_drop_wm_trigger_to_pool_mapping_args() { } - public get_triggers_for_resourceplan_args( - WMGetTriggersForResourePlanRequest request) + public create_or_drop_wm_trigger_to_pool_mapping_args( + WMCreateOrDropTriggerToPoolMappingRequest request) { this(); this.request = request; @@ -201170,14 +207351,14 @@ public get_triggers_for_resourceplan_args( /** * Performs a deep copy on other. */ - public get_triggers_for_resourceplan_args(get_triggers_for_resourceplan_args other) { + public create_or_drop_wm_trigger_to_pool_mapping_args(create_or_drop_wm_trigger_to_pool_mapping_args other) { if (other.isSetRequest()) { - this.request = new WMGetTriggersForResourePlanRequest(other.request); + this.request = new WMCreateOrDropTriggerToPoolMappingRequest(other.request); } } - public get_triggers_for_resourceplan_args deepCopy() { - return new get_triggers_for_resourceplan_args(this); + public create_or_drop_wm_trigger_to_pool_mapping_args deepCopy() { + return new create_or_drop_wm_trigger_to_pool_mapping_args(this); } @Override @@ -201185,11 +207366,11 @@ public void clear() { this.request = null; } - public WMGetTriggersForResourePlanRequest getRequest() { + public WMCreateOrDropTriggerToPoolMappingRequest getRequest() { return this.request; } - public void setRequest(WMGetTriggersForResourePlanRequest request) { + public void setRequest(WMCreateOrDropTriggerToPoolMappingRequest request) { this.request = request; } @@ -201214,7 +207395,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((WMGetTriggersForResourePlanRequest)value); + setRequest((WMCreateOrDropTriggerToPoolMappingRequest)value); } break; @@ -201247,12 +207428,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_triggers_for_resourceplan_args) - return this.equals((get_triggers_for_resourceplan_args)that); + if (that instanceof create_or_drop_wm_trigger_to_pool_mapping_args) + return this.equals((create_or_drop_wm_trigger_to_pool_mapping_args)that); return false; } - public boolean equals(get_triggers_for_resourceplan_args that) { + public boolean equals(create_or_drop_wm_trigger_to_pool_mapping_args that) { if (that == null) return false; @@ -201281,7 +207462,7 @@ public int hashCode() { } @Override - public int compareTo(get_triggers_for_resourceplan_args other) { + public int compareTo(create_or_drop_wm_trigger_to_pool_mapping_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -201315,7 +207496,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_triggers_for_resourceplan_args("); + StringBuilder sb = new StringBuilder("create_or_drop_wm_trigger_to_pool_mapping_args("); boolean first = true; sb.append("request:"); @@ -201353,15 +207534,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_triggers_for_resourceplan_argsStandardSchemeFactory implements SchemeFactory { - public get_triggers_for_resourceplan_argsStandardScheme getScheme() { - return new get_triggers_for_resourceplan_argsStandardScheme(); + private static class create_or_drop_wm_trigger_to_pool_mapping_argsStandardSchemeFactory implements SchemeFactory { + public create_or_drop_wm_trigger_to_pool_mapping_argsStandardScheme getScheme() { + return new create_or_drop_wm_trigger_to_pool_mapping_argsStandardScheme(); } } - private static class get_triggers_for_resourceplan_argsStandardScheme extends StandardScheme { + private static class create_or_drop_wm_trigger_to_pool_mapping_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_triggers_for_resourceplan_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, create_or_drop_wm_trigger_to_pool_mapping_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -201373,7 +207554,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_triggers_for_re switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new WMGetTriggersForResourePlanRequest(); + struct.request = new WMCreateOrDropTriggerToPoolMappingRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -201389,7 +207570,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_triggers_for_re struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_triggers_for_resourceplan_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, create_or_drop_wm_trigger_to_pool_mapping_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -201404,16 +207585,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_triggers_for_r } - private static class get_triggers_for_resourceplan_argsTupleSchemeFactory implements SchemeFactory { - public get_triggers_for_resourceplan_argsTupleScheme getScheme() { - return new get_triggers_for_resourceplan_argsTupleScheme(); + private static class create_or_drop_wm_trigger_to_pool_mapping_argsTupleSchemeFactory implements SchemeFactory { + public create_or_drop_wm_trigger_to_pool_mapping_argsTupleScheme getScheme() { + return new create_or_drop_wm_trigger_to_pool_mapping_argsTupleScheme(); } } - private static class get_triggers_for_resourceplan_argsTupleScheme extends TupleScheme { + private static class create_or_drop_wm_trigger_to_pool_mapping_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_triggers_for_resourceplan_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, create_or_drop_wm_trigger_to_pool_mapping_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -201426,11 +207607,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_triggers_for_re } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_triggers_for_resourceplan_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, create_or_drop_wm_trigger_to_pool_mapping_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new WMGetTriggersForResourePlanRequest(); + struct.request = new WMCreateOrDropTriggerToPoolMappingRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -201439,28 +207620,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_triggers_for_res } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_triggers_for_resourceplan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_triggers_for_resourceplan_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_or_drop_wm_trigger_to_pool_mapping_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_or_drop_wm_trigger_to_pool_mapping_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField O4_FIELD_DESC = new org.apache.thrift.protocol.TField("o4", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_triggers_for_resourceplan_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_triggers_for_resourceplan_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new create_or_drop_wm_trigger_to_pool_mapping_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_or_drop_wm_trigger_to_pool_mapping_resultTupleSchemeFactory()); } - private WMGetTriggersForResourePlanResponse success; // required - private NoSuchObjectException o1; // required - private MetaException o2; // required + private WMCreateOrDropTriggerToPoolMappingResponse success; // required + private AlreadyExistsException o1; // required + private NoSuchObjectException o2; // required + private InvalidObjectException o3; // required + private MetaException o4; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { SUCCESS((short)0, "success"), O1((short)1, "o1"), - O2((short)2, "o2"); + O2((short)2, "o2"), + O3((short)3, "o3"), + O4((short)4, "o4"); private static final Map byName = new HashMap(); @@ -201481,6 +207668,10 @@ public static _Fields findByThriftId(int fieldId) { return O1; case 2: // O2 return O2; + case 3: // O3 + return O3; + case 4: // O4 + return O4; default: return null; } @@ -201525,46 +207716,60 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMGetTriggersForResourePlanResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMCreateOrDropTriggerToPoolMappingResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O4, new org.apache.thrift.meta_data.FieldMetaData("o4", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_triggers_for_resourceplan_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_or_drop_wm_trigger_to_pool_mapping_result.class, metaDataMap); } - public get_triggers_for_resourceplan_result() { + public create_or_drop_wm_trigger_to_pool_mapping_result() { } - public get_triggers_for_resourceplan_result( - WMGetTriggersForResourePlanResponse success, - NoSuchObjectException o1, - MetaException o2) + public create_or_drop_wm_trigger_to_pool_mapping_result( + WMCreateOrDropTriggerToPoolMappingResponse success, + AlreadyExistsException o1, + NoSuchObjectException o2, + InvalidObjectException o3, + MetaException o4) { this(); this.success = success; this.o1 = o1; this.o2 = o2; + this.o3 = o3; + this.o4 = o4; } /** * Performs a deep copy on other. */ - public get_triggers_for_resourceplan_result(get_triggers_for_resourceplan_result other) { + public create_or_drop_wm_trigger_to_pool_mapping_result(create_or_drop_wm_trigger_to_pool_mapping_result other) { if (other.isSetSuccess()) { - this.success = new WMGetTriggersForResourePlanResponse(other.success); + this.success = new WMCreateOrDropTriggerToPoolMappingResponse(other.success); } if (other.isSetO1()) { - this.o1 = new NoSuchObjectException(other.o1); + this.o1 = new AlreadyExistsException(other.o1); } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new NoSuchObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new InvalidObjectException(other.o3); + } + if (other.isSetO4()) { + this.o4 = new MetaException(other.o4); } } - public get_triggers_for_resourceplan_result deepCopy() { - return new get_triggers_for_resourceplan_result(this); + public create_or_drop_wm_trigger_to_pool_mapping_result deepCopy() { + return new create_or_drop_wm_trigger_to_pool_mapping_result(this); } @Override @@ -201572,13 +207777,15 @@ public void clear() { this.success = null; this.o1 = null; this.o2 = null; + this.o3 = null; + this.o4 = null; } - public WMGetTriggersForResourePlanResponse getSuccess() { + public WMCreateOrDropTriggerToPoolMappingResponse getSuccess() { return this.success; } - public void setSuccess(WMGetTriggersForResourePlanResponse success) { + public void setSuccess(WMCreateOrDropTriggerToPoolMappingResponse success) { this.success = success; } @@ -201597,11 +207804,11 @@ public void setSuccessIsSet(boolean value) { } } - public NoSuchObjectException getO1() { + public AlreadyExistsException getO1() { return this.o1; } - public void setO1(NoSuchObjectException o1) { + public void setO1(AlreadyExistsException o1) { this.o1 = o1; } @@ -201620,11 +207827,11 @@ public void setO1IsSet(boolean value) { } } - public MetaException getO2() { + public NoSuchObjectException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(NoSuchObjectException o2) { this.o2 = o2; } @@ -201643,13 +207850,59 @@ public void setO2IsSet(boolean value) { } } + public InvalidObjectException getO3() { + return this.o3; + } + + public void setO3(InvalidObjectException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public MetaException getO4() { + return this.o4; + } + + public void setO4(MetaException o4) { + this.o4 = o4; + } + + public void unsetO4() { + this.o4 = null; + } + + /** Returns true if field o4 is set (has been assigned a value) and false otherwise */ + public boolean isSetO4() { + return this.o4 != null; + } + + public void setO4IsSet(boolean value) { + if (!value) { + this.o4 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case SUCCESS: if (value == null) { unsetSuccess(); } else { - setSuccess((WMGetTriggersForResourePlanResponse)value); + setSuccess((WMCreateOrDropTriggerToPoolMappingResponse)value); } break; @@ -201657,7 +207910,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO1(); } else { - setO1((NoSuchObjectException)value); + setO1((AlreadyExistsException)value); } break; @@ -201665,7 +207918,23 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((NoSuchObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((InvalidObjectException)value); + } + break; + + case O4: + if (value == null) { + unsetO4(); + } else { + setO4((MetaException)value); } break; @@ -201683,6 +207952,12 @@ public Object getFieldValue(_Fields field) { case O2: return getO2(); + case O3: + return getO3(); + + case O4: + return getO4(); + } throw new IllegalStateException(); } @@ -201700,6 +207975,10 @@ public boolean isSet(_Fields field) { return isSetO1(); case O2: return isSetO2(); + case O3: + return isSetO3(); + case O4: + return isSetO4(); } throw new IllegalStateException(); } @@ -201708,12 +207987,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_triggers_for_resourceplan_result) - return this.equals((get_triggers_for_resourceplan_result)that); + if (that instanceof create_or_drop_wm_trigger_to_pool_mapping_result) + return this.equals((create_or_drop_wm_trigger_to_pool_mapping_result)that); return false; } - public boolean equals(get_triggers_for_resourceplan_result that) { + public boolean equals(create_or_drop_wm_trigger_to_pool_mapping_result that) { if (that == null) return false; @@ -201744,6 +208023,24 @@ public boolean equals(get_triggers_for_resourceplan_result that) { return false; } + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + boolean this_present_o4 = true && this.isSetO4(); + boolean that_present_o4 = true && that.isSetO4(); + if (this_present_o4 || that_present_o4) { + if (!(this_present_o4 && that_present_o4)) + return false; + if (!this.o4.equals(that.o4)) + return false; + } + return true; } @@ -201766,11 +208063,21 @@ public int hashCode() { if (present_o2) list.add(o2); + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + + boolean present_o4 = true && (isSetO4()); + list.add(present_o4); + if (present_o4) + list.add(o4); + return list.hashCode(); } @Override - public int compareTo(get_triggers_for_resourceplan_result other) { + public int compareTo(create_or_drop_wm_trigger_to_pool_mapping_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -201807,6 +208114,26 @@ public int compareTo(get_triggers_for_resourceplan_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO4()).compareTo(other.isSetO4()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO4()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o4, other.o4); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -201824,7 +208151,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_triggers_for_resourceplan_result("); + StringBuilder sb = new StringBuilder("create_or_drop_wm_trigger_to_pool_mapping_result("); boolean first = true; sb.append("success:"); @@ -201850,6 +208177,22 @@ public String toString() { sb.append(this.o2); } first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + if (!first) sb.append(", "); + sb.append("o4:"); + if (this.o4 == null) { + sb.append("null"); + } else { + sb.append(this.o4); + } + first = false; sb.append(")"); return sb.toString(); } @@ -201878,15 +208221,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_triggers_for_resourceplan_resultStandardSchemeFactory implements SchemeFactory { - public get_triggers_for_resourceplan_resultStandardScheme getScheme() { - return new get_triggers_for_resourceplan_resultStandardScheme(); + private static class create_or_drop_wm_trigger_to_pool_mapping_resultStandardSchemeFactory implements SchemeFactory { + public create_or_drop_wm_trigger_to_pool_mapping_resultStandardScheme getScheme() { + return new create_or_drop_wm_trigger_to_pool_mapping_resultStandardScheme(); } } - private static class get_triggers_for_resourceplan_resultStandardScheme extends StandardScheme { + private static class create_or_drop_wm_trigger_to_pool_mapping_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_triggers_for_resourceplan_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, create_or_drop_wm_trigger_to_pool_mapping_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -201898,7 +208241,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_triggers_for_re switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new WMGetTriggersForResourePlanResponse(); + struct.success = new WMCreateOrDropTriggerToPoolMappingResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -201907,7 +208250,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_triggers_for_re break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new NoSuchObjectException(); + struct.o1 = new AlreadyExistsException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -201916,13 +208259,31 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_triggers_for_re break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new MetaException(); + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new InvalidObjectException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // O4 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o4 = new MetaException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -201932,7 +208293,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_triggers_for_re struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_triggers_for_resourceplan_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, create_or_drop_wm_trigger_to_pool_mapping_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -201951,22 +208312,32 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_triggers_for_r struct.o2.write(oprot); oprot.writeFieldEnd(); } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o4 != null) { + oprot.writeFieldBegin(O4_FIELD_DESC); + struct.o4.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_triggers_for_resourceplan_resultTupleSchemeFactory implements SchemeFactory { - public get_triggers_for_resourceplan_resultTupleScheme getScheme() { - return new get_triggers_for_resourceplan_resultTupleScheme(); + private static class create_or_drop_wm_trigger_to_pool_mapping_resultTupleSchemeFactory implements SchemeFactory { + public create_or_drop_wm_trigger_to_pool_mapping_resultTupleScheme getScheme() { + return new create_or_drop_wm_trigger_to_pool_mapping_resultTupleScheme(); } } - private static class get_triggers_for_resourceplan_resultTupleScheme extends TupleScheme { + private static class create_or_drop_wm_trigger_to_pool_mapping_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_triggers_for_resourceplan_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, create_or_drop_wm_trigger_to_pool_mapping_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -201978,7 +208349,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_triggers_for_re if (struct.isSetO2()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetO3()) { + optionals.set(3); + } + if (struct.isSetO4()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetSuccess()) { struct.success.write(oprot); } @@ -201988,27 +208365,43 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_triggers_for_re if (struct.isSetO2()) { struct.o2.write(oprot); } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + if (struct.isSetO4()) { + struct.o4.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_triggers_for_resourceplan_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, create_or_drop_wm_trigger_to_pool_mapping_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { - struct.success = new WMGetTriggersForResourePlanResponse(); + struct.success = new WMCreateOrDropTriggerToPoolMappingResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o1 = new NoSuchObjectException(); + struct.o1 = new AlreadyExistsException(); struct.o1.read(iprot); struct.setO1IsSet(true); } if (incoming.get(2)) { - struct.o2 = new MetaException(); + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } + if (incoming.get(3)) { + struct.o3 = new InvalidObjectException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + if (incoming.get(4)) { + struct.o4 = new MetaException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrDropTriggerToPoolMappingRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrDropTriggerToPoolMappingRequest.java new file mode 100644 index 0000000000..f807be9854 --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrDropTriggerToPoolMappingRequest.java @@ -0,0 +1,708 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMCreateOrDropTriggerToPoolMappingRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMCreateOrDropTriggerToPoolMappingRequest"); + + private static final org.apache.thrift.protocol.TField RESOURCE_PLAN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("resourcePlanName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TRIGGER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("triggerName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField POOL_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("poolPath", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField DROP_FIELD_DESC = new org.apache.thrift.protocol.TField("drop", org.apache.thrift.protocol.TType.BOOL, (short)4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new WMCreateOrDropTriggerToPoolMappingRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new WMCreateOrDropTriggerToPoolMappingRequestTupleSchemeFactory()); + } + + private String resourcePlanName; // optional + private String triggerName; // optional + private String poolPath; // optional + private boolean drop; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RESOURCE_PLAN_NAME((short)1, "resourcePlanName"), + TRIGGER_NAME((short)2, "triggerName"), + POOL_PATH((short)3, "poolPath"), + DROP((short)4, "drop"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RESOURCE_PLAN_NAME + return RESOURCE_PLAN_NAME; + case 2: // TRIGGER_NAME + return TRIGGER_NAME; + case 3: // POOL_PATH + return POOL_PATH; + case 4: // DROP + return DROP; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __DROP_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.RESOURCE_PLAN_NAME,_Fields.TRIGGER_NAME,_Fields.POOL_PATH,_Fields.DROP}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RESOURCE_PLAN_NAME, new org.apache.thrift.meta_data.FieldMetaData("resourcePlanName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TRIGGER_NAME, new org.apache.thrift.meta_data.FieldMetaData("triggerName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.POOL_PATH, new org.apache.thrift.meta_data.FieldMetaData("poolPath", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.DROP, new org.apache.thrift.meta_data.FieldMetaData("drop", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMCreateOrDropTriggerToPoolMappingRequest.class, metaDataMap); + } + + public WMCreateOrDropTriggerToPoolMappingRequest() { + } + + /** + * Performs a deep copy on other. + */ + public WMCreateOrDropTriggerToPoolMappingRequest(WMCreateOrDropTriggerToPoolMappingRequest other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetResourcePlanName()) { + this.resourcePlanName = other.resourcePlanName; + } + if (other.isSetTriggerName()) { + this.triggerName = other.triggerName; + } + if (other.isSetPoolPath()) { + this.poolPath = other.poolPath; + } + this.drop = other.drop; + } + + public WMCreateOrDropTriggerToPoolMappingRequest deepCopy() { + return new WMCreateOrDropTriggerToPoolMappingRequest(this); + } + + @Override + public void clear() { + this.resourcePlanName = null; + this.triggerName = null; + this.poolPath = null; + setDropIsSet(false); + this.drop = false; + } + + public String getResourcePlanName() { + return this.resourcePlanName; + } + + public void setResourcePlanName(String resourcePlanName) { + this.resourcePlanName = resourcePlanName; + } + + public void unsetResourcePlanName() { + this.resourcePlanName = null; + } + + /** Returns true if field resourcePlanName is set (has been assigned a value) and false otherwise */ + public boolean isSetResourcePlanName() { + return this.resourcePlanName != null; + } + + public void setResourcePlanNameIsSet(boolean value) { + if (!value) { + this.resourcePlanName = null; + } + } + + public String getTriggerName() { + return this.triggerName; + } + + public void setTriggerName(String triggerName) { + this.triggerName = triggerName; + } + + public void unsetTriggerName() { + this.triggerName = null; + } + + /** Returns true if field triggerName is set (has been assigned a value) and false otherwise */ + public boolean isSetTriggerName() { + return this.triggerName != null; + } + + public void setTriggerNameIsSet(boolean value) { + if (!value) { + this.triggerName = null; + } + } + + public String getPoolPath() { + return this.poolPath; + } + + public void setPoolPath(String poolPath) { + this.poolPath = poolPath; + } + + public void unsetPoolPath() { + this.poolPath = null; + } + + /** Returns true if field poolPath is set (has been assigned a value) and false otherwise */ + public boolean isSetPoolPath() { + return this.poolPath != null; + } + + public void setPoolPathIsSet(boolean value) { + if (!value) { + this.poolPath = null; + } + } + + public boolean isDrop() { + return this.drop; + } + + public void setDrop(boolean drop) { + this.drop = drop; + setDropIsSet(true); + } + + public void unsetDrop() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DROP_ISSET_ID); + } + + /** Returns true if field drop is set (has been assigned a value) and false otherwise */ + public boolean isSetDrop() { + return EncodingUtils.testBit(__isset_bitfield, __DROP_ISSET_ID); + } + + public void setDropIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DROP_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RESOURCE_PLAN_NAME: + if (value == null) { + unsetResourcePlanName(); + } else { + setResourcePlanName((String)value); + } + break; + + case TRIGGER_NAME: + if (value == null) { + unsetTriggerName(); + } else { + setTriggerName((String)value); + } + break; + + case POOL_PATH: + if (value == null) { + unsetPoolPath(); + } else { + setPoolPath((String)value); + } + break; + + case DROP: + if (value == null) { + unsetDrop(); + } else { + setDrop((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RESOURCE_PLAN_NAME: + return getResourcePlanName(); + + case TRIGGER_NAME: + return getTriggerName(); + + case POOL_PATH: + return getPoolPath(); + + case DROP: + return isDrop(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RESOURCE_PLAN_NAME: + return isSetResourcePlanName(); + case TRIGGER_NAME: + return isSetTriggerName(); + case POOL_PATH: + return isSetPoolPath(); + case DROP: + return isSetDrop(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WMCreateOrDropTriggerToPoolMappingRequest) + return this.equals((WMCreateOrDropTriggerToPoolMappingRequest)that); + return false; + } + + public boolean equals(WMCreateOrDropTriggerToPoolMappingRequest that) { + if (that == null) + return false; + + boolean this_present_resourcePlanName = true && this.isSetResourcePlanName(); + boolean that_present_resourcePlanName = true && that.isSetResourcePlanName(); + if (this_present_resourcePlanName || that_present_resourcePlanName) { + if (!(this_present_resourcePlanName && that_present_resourcePlanName)) + return false; + if (!this.resourcePlanName.equals(that.resourcePlanName)) + return false; + } + + boolean this_present_triggerName = true && this.isSetTriggerName(); + boolean that_present_triggerName = true && that.isSetTriggerName(); + if (this_present_triggerName || that_present_triggerName) { + if (!(this_present_triggerName && that_present_triggerName)) + return false; + if (!this.triggerName.equals(that.triggerName)) + return false; + } + + boolean this_present_poolPath = true && this.isSetPoolPath(); + boolean that_present_poolPath = true && that.isSetPoolPath(); + if (this_present_poolPath || that_present_poolPath) { + if (!(this_present_poolPath && that_present_poolPath)) + return false; + if (!this.poolPath.equals(that.poolPath)) + return false; + } + + boolean this_present_drop = true && this.isSetDrop(); + boolean that_present_drop = true && that.isSetDrop(); + if (this_present_drop || that_present_drop) { + if (!(this_present_drop && that_present_drop)) + return false; + if (this.drop != that.drop) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_resourcePlanName = true && (isSetResourcePlanName()); + list.add(present_resourcePlanName); + if (present_resourcePlanName) + list.add(resourcePlanName); + + boolean present_triggerName = true && (isSetTriggerName()); + list.add(present_triggerName); + if (present_triggerName) + list.add(triggerName); + + boolean present_poolPath = true && (isSetPoolPath()); + list.add(present_poolPath); + if (present_poolPath) + list.add(poolPath); + + boolean present_drop = true && (isSetDrop()); + list.add(present_drop); + if (present_drop) + list.add(drop); + + return list.hashCode(); + } + + @Override + public int compareTo(WMCreateOrDropTriggerToPoolMappingRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetResourcePlanName()).compareTo(other.isSetResourcePlanName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetResourcePlanName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.resourcePlanName, other.resourcePlanName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTriggerName()).compareTo(other.isSetTriggerName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTriggerName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.triggerName, other.triggerName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPoolPath()).compareTo(other.isSetPoolPath()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPoolPath()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.poolPath, other.poolPath); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDrop()).compareTo(other.isSetDrop()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDrop()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.drop, other.drop); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WMCreateOrDropTriggerToPoolMappingRequest("); + boolean first = true; + + if (isSetResourcePlanName()) { + sb.append("resourcePlanName:"); + if (this.resourcePlanName == null) { + sb.append("null"); + } else { + sb.append(this.resourcePlanName); + } + first = false; + } + if (isSetTriggerName()) { + if (!first) sb.append(", "); + sb.append("triggerName:"); + if (this.triggerName == null) { + sb.append("null"); + } else { + sb.append(this.triggerName); + } + first = false; + } + if (isSetPoolPath()) { + if (!first) sb.append(", "); + sb.append("poolPath:"); + if (this.poolPath == null) { + sb.append("null"); + } else { + sb.append(this.poolPath); + } + first = false; + } + if (isSetDrop()) { + if (!first) sb.append(", "); + sb.append("drop:"); + sb.append(this.drop); + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class WMCreateOrDropTriggerToPoolMappingRequestStandardSchemeFactory implements SchemeFactory { + public WMCreateOrDropTriggerToPoolMappingRequestStandardScheme getScheme() { + return new WMCreateOrDropTriggerToPoolMappingRequestStandardScheme(); + } + } + + private static class WMCreateOrDropTriggerToPoolMappingRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, WMCreateOrDropTriggerToPoolMappingRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RESOURCE_PLAN_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.resourcePlanName = iprot.readString(); + struct.setResourcePlanNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TRIGGER_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.triggerName = iprot.readString(); + struct.setTriggerNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // POOL_PATH + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.poolPath = iprot.readString(); + struct.setPoolPathIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // DROP + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.drop = iprot.readBool(); + struct.setDropIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, WMCreateOrDropTriggerToPoolMappingRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.resourcePlanName != null) { + if (struct.isSetResourcePlanName()) { + oprot.writeFieldBegin(RESOURCE_PLAN_NAME_FIELD_DESC); + oprot.writeString(struct.resourcePlanName); + oprot.writeFieldEnd(); + } + } + if (struct.triggerName != null) { + if (struct.isSetTriggerName()) { + oprot.writeFieldBegin(TRIGGER_NAME_FIELD_DESC); + oprot.writeString(struct.triggerName); + oprot.writeFieldEnd(); + } + } + if (struct.poolPath != null) { + if (struct.isSetPoolPath()) { + oprot.writeFieldBegin(POOL_PATH_FIELD_DESC); + oprot.writeString(struct.poolPath); + oprot.writeFieldEnd(); + } + } + if (struct.isSetDrop()) { + oprot.writeFieldBegin(DROP_FIELD_DESC); + oprot.writeBool(struct.drop); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class WMCreateOrDropTriggerToPoolMappingRequestTupleSchemeFactory implements SchemeFactory { + public WMCreateOrDropTriggerToPoolMappingRequestTupleScheme getScheme() { + return new WMCreateOrDropTriggerToPoolMappingRequestTupleScheme(); + } + } + + private static class WMCreateOrDropTriggerToPoolMappingRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, WMCreateOrDropTriggerToPoolMappingRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetResourcePlanName()) { + optionals.set(0); + } + if (struct.isSetTriggerName()) { + optionals.set(1); + } + if (struct.isSetPoolPath()) { + optionals.set(2); + } + if (struct.isSetDrop()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetResourcePlanName()) { + oprot.writeString(struct.resourcePlanName); + } + if (struct.isSetTriggerName()) { + oprot.writeString(struct.triggerName); + } + if (struct.isSetPoolPath()) { + oprot.writeString(struct.poolPath); + } + if (struct.isSetDrop()) { + oprot.writeBool(struct.drop); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, WMCreateOrDropTriggerToPoolMappingRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(4); + if (incoming.get(0)) { + struct.resourcePlanName = iprot.readString(); + struct.setResourcePlanNameIsSet(true); + } + if (incoming.get(1)) { + struct.triggerName = iprot.readString(); + struct.setTriggerNameIsSet(true); + } + if (incoming.get(2)) { + struct.poolPath = iprot.readString(); + struct.setPoolPathIsSet(true); + } + if (incoming.get(3)) { + struct.drop = iprot.readBool(); + struct.setDropIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrDropTriggerToPoolMappingResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrDropTriggerToPoolMappingResponse.java new file mode 100644 index 0000000000..33b28cd04f --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrDropTriggerToPoolMappingResponse.java @@ -0,0 +1,283 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMCreateOrDropTriggerToPoolMappingResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMCreateOrDropTriggerToPoolMappingResponse"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new WMCreateOrDropTriggerToPoolMappingResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new WMCreateOrDropTriggerToPoolMappingResponseTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMCreateOrDropTriggerToPoolMappingResponse.class, metaDataMap); + } + + public WMCreateOrDropTriggerToPoolMappingResponse() { + } + + /** + * Performs a deep copy on other. + */ + public WMCreateOrDropTriggerToPoolMappingResponse(WMCreateOrDropTriggerToPoolMappingResponse other) { + } + + public WMCreateOrDropTriggerToPoolMappingResponse deepCopy() { + return new WMCreateOrDropTriggerToPoolMappingResponse(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WMCreateOrDropTriggerToPoolMappingResponse) + return this.equals((WMCreateOrDropTriggerToPoolMappingResponse)that); + return false; + } + + public boolean equals(WMCreateOrDropTriggerToPoolMappingResponse that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(WMCreateOrDropTriggerToPoolMappingResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WMCreateOrDropTriggerToPoolMappingResponse("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class WMCreateOrDropTriggerToPoolMappingResponseStandardSchemeFactory implements SchemeFactory { + public WMCreateOrDropTriggerToPoolMappingResponseStandardScheme getScheme() { + return new WMCreateOrDropTriggerToPoolMappingResponseStandardScheme(); + } + } + + private static class WMCreateOrDropTriggerToPoolMappingResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, WMCreateOrDropTriggerToPoolMappingResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, WMCreateOrDropTriggerToPoolMappingResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class WMCreateOrDropTriggerToPoolMappingResponseTupleSchemeFactory implements SchemeFactory { + public WMCreateOrDropTriggerToPoolMappingResponseTupleScheme getScheme() { + return new WMCreateOrDropTriggerToPoolMappingResponseTupleScheme(); + } + } + + private static class WMCreateOrDropTriggerToPoolMappingResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, WMCreateOrDropTriggerToPoolMappingResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, WMCreateOrDropTriggerToPoolMappingResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdateMappingRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdateMappingRequest.java new file mode 100644 index 0000000000..2fbe10eb26 --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdateMappingRequest.java @@ -0,0 +1,501 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMCreateOrUpdateMappingRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMCreateOrUpdateMappingRequest"); + + private static final org.apache.thrift.protocol.TField MAPPING_FIELD_DESC = new org.apache.thrift.protocol.TField("mapping", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField UPDATE_FIELD_DESC = new org.apache.thrift.protocol.TField("update", org.apache.thrift.protocol.TType.BOOL, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new WMCreateOrUpdateMappingRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new WMCreateOrUpdateMappingRequestTupleSchemeFactory()); + } + + private WMMapping mapping; // optional + private boolean update; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + MAPPING((short)1, "mapping"), + UPDATE((short)2, "update"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // MAPPING + return MAPPING; + case 2: // UPDATE + return UPDATE; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __UPDATE_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.MAPPING,_Fields.UPDATE}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.MAPPING, new org.apache.thrift.meta_data.FieldMetaData("mapping", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMMapping.class))); + tmpMap.put(_Fields.UPDATE, new org.apache.thrift.meta_data.FieldMetaData("update", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMCreateOrUpdateMappingRequest.class, metaDataMap); + } + + public WMCreateOrUpdateMappingRequest() { + } + + /** + * Performs a deep copy on other. + */ + public WMCreateOrUpdateMappingRequest(WMCreateOrUpdateMappingRequest other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetMapping()) { + this.mapping = new WMMapping(other.mapping); + } + this.update = other.update; + } + + public WMCreateOrUpdateMappingRequest deepCopy() { + return new WMCreateOrUpdateMappingRequest(this); + } + + @Override + public void clear() { + this.mapping = null; + setUpdateIsSet(false); + this.update = false; + } + + public WMMapping getMapping() { + return this.mapping; + } + + public void setMapping(WMMapping mapping) { + this.mapping = mapping; + } + + public void unsetMapping() { + this.mapping = null; + } + + /** Returns true if field mapping is set (has been assigned a value) and false otherwise */ + public boolean isSetMapping() { + return this.mapping != null; + } + + public void setMappingIsSet(boolean value) { + if (!value) { + this.mapping = null; + } + } + + public boolean isUpdate() { + return this.update; + } + + public void setUpdate(boolean update) { + this.update = update; + setUpdateIsSet(true); + } + + public void unsetUpdate() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPDATE_ISSET_ID); + } + + /** Returns true if field update is set (has been assigned a value) and false otherwise */ + public boolean isSetUpdate() { + return EncodingUtils.testBit(__isset_bitfield, __UPDATE_ISSET_ID); + } + + public void setUpdateIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPDATE_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case MAPPING: + if (value == null) { + unsetMapping(); + } else { + setMapping((WMMapping)value); + } + break; + + case UPDATE: + if (value == null) { + unsetUpdate(); + } else { + setUpdate((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case MAPPING: + return getMapping(); + + case UPDATE: + return isUpdate(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case MAPPING: + return isSetMapping(); + case UPDATE: + return isSetUpdate(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WMCreateOrUpdateMappingRequest) + return this.equals((WMCreateOrUpdateMappingRequest)that); + return false; + } + + public boolean equals(WMCreateOrUpdateMappingRequest that) { + if (that == null) + return false; + + boolean this_present_mapping = true && this.isSetMapping(); + boolean that_present_mapping = true && that.isSetMapping(); + if (this_present_mapping || that_present_mapping) { + if (!(this_present_mapping && that_present_mapping)) + return false; + if (!this.mapping.equals(that.mapping)) + return false; + } + + boolean this_present_update = true && this.isSetUpdate(); + boolean that_present_update = true && that.isSetUpdate(); + if (this_present_update || that_present_update) { + if (!(this_present_update && that_present_update)) + return false; + if (this.update != that.update) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_mapping = true && (isSetMapping()); + list.add(present_mapping); + if (present_mapping) + list.add(mapping); + + boolean present_update = true && (isSetUpdate()); + list.add(present_update); + if (present_update) + list.add(update); + + return list.hashCode(); + } + + @Override + public int compareTo(WMCreateOrUpdateMappingRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetMapping()).compareTo(other.isSetMapping()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMapping()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mapping, other.mapping); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetUpdate()).compareTo(other.isSetUpdate()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetUpdate()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.update, other.update); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WMCreateOrUpdateMappingRequest("); + boolean first = true; + + if (isSetMapping()) { + sb.append("mapping:"); + if (this.mapping == null) { + sb.append("null"); + } else { + sb.append(this.mapping); + } + first = false; + } + if (isSetUpdate()) { + if (!first) sb.append(", "); + sb.append("update:"); + sb.append(this.update); + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (mapping != null) { + mapping.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class WMCreateOrUpdateMappingRequestStandardSchemeFactory implements SchemeFactory { + public WMCreateOrUpdateMappingRequestStandardScheme getScheme() { + return new WMCreateOrUpdateMappingRequestStandardScheme(); + } + } + + private static class WMCreateOrUpdateMappingRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, WMCreateOrUpdateMappingRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // MAPPING + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.mapping = new WMMapping(); + struct.mapping.read(iprot); + struct.setMappingIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // UPDATE + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.update = iprot.readBool(); + struct.setUpdateIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, WMCreateOrUpdateMappingRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.mapping != null) { + if (struct.isSetMapping()) { + oprot.writeFieldBegin(MAPPING_FIELD_DESC); + struct.mapping.write(oprot); + oprot.writeFieldEnd(); + } + } + if (struct.isSetUpdate()) { + oprot.writeFieldBegin(UPDATE_FIELD_DESC); + oprot.writeBool(struct.update); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class WMCreateOrUpdateMappingRequestTupleSchemeFactory implements SchemeFactory { + public WMCreateOrUpdateMappingRequestTupleScheme getScheme() { + return new WMCreateOrUpdateMappingRequestTupleScheme(); + } + } + + private static class WMCreateOrUpdateMappingRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, WMCreateOrUpdateMappingRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetMapping()) { + optionals.set(0); + } + if (struct.isSetUpdate()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetMapping()) { + struct.mapping.write(oprot); + } + if (struct.isSetUpdate()) { + oprot.writeBool(struct.update); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, WMCreateOrUpdateMappingRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.mapping = new WMMapping(); + struct.mapping.read(iprot); + struct.setMappingIsSet(true); + } + if (incoming.get(1)) { + struct.update = iprot.readBool(); + struct.setUpdateIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdateMappingResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdateMappingResponse.java new file mode 100644 index 0000000000..5b8041f7b9 --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdateMappingResponse.java @@ -0,0 +1,283 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMCreateOrUpdateMappingResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMCreateOrUpdateMappingResponse"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new WMCreateOrUpdateMappingResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new WMCreateOrUpdateMappingResponseTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMCreateOrUpdateMappingResponse.class, metaDataMap); + } + + public WMCreateOrUpdateMappingResponse() { + } + + /** + * Performs a deep copy on other. + */ + public WMCreateOrUpdateMappingResponse(WMCreateOrUpdateMappingResponse other) { + } + + public WMCreateOrUpdateMappingResponse deepCopy() { + return new WMCreateOrUpdateMappingResponse(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WMCreateOrUpdateMappingResponse) + return this.equals((WMCreateOrUpdateMappingResponse)that); + return false; + } + + public boolean equals(WMCreateOrUpdateMappingResponse that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(WMCreateOrUpdateMappingResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WMCreateOrUpdateMappingResponse("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class WMCreateOrUpdateMappingResponseStandardSchemeFactory implements SchemeFactory { + public WMCreateOrUpdateMappingResponseStandardScheme getScheme() { + return new WMCreateOrUpdateMappingResponseStandardScheme(); + } + } + + private static class WMCreateOrUpdateMappingResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, WMCreateOrUpdateMappingResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, WMCreateOrUpdateMappingResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class WMCreateOrUpdateMappingResponseTupleSchemeFactory implements SchemeFactory { + public WMCreateOrUpdateMappingResponseTupleScheme getScheme() { + return new WMCreateOrUpdateMappingResponseTupleScheme(); + } + } + + private static class WMCreateOrUpdateMappingResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, WMCreateOrUpdateMappingResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, WMCreateOrUpdateMappingResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdatePoolRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdatePoolRequest.java new file mode 100644 index 0000000000..9e96c0a71e --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdatePoolRequest.java @@ -0,0 +1,607 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMCreateOrUpdatePoolRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMCreateOrUpdatePoolRequest"); + + private static final org.apache.thrift.protocol.TField POOL_FIELD_DESC = new org.apache.thrift.protocol.TField("pool", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField NEW_POOL_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("newPoolPath", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField UPDATE_FIELD_DESC = new org.apache.thrift.protocol.TField("update", org.apache.thrift.protocol.TType.BOOL, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new WMCreateOrUpdatePoolRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new WMCreateOrUpdatePoolRequestTupleSchemeFactory()); + } + + private WMPool pool; // optional + private String newPoolPath; // optional + private boolean update; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + POOL((short)1, "pool"), + NEW_POOL_PATH((short)2, "newPoolPath"), + UPDATE((short)3, "update"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // POOL + return POOL; + case 2: // NEW_POOL_PATH + return NEW_POOL_PATH; + case 3: // UPDATE + return UPDATE; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __UPDATE_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.POOL,_Fields.NEW_POOL_PATH,_Fields.UPDATE}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.POOL, new org.apache.thrift.meta_data.FieldMetaData("pool", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMPool.class))); + tmpMap.put(_Fields.NEW_POOL_PATH, new org.apache.thrift.meta_data.FieldMetaData("newPoolPath", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.UPDATE, new org.apache.thrift.meta_data.FieldMetaData("update", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMCreateOrUpdatePoolRequest.class, metaDataMap); + } + + public WMCreateOrUpdatePoolRequest() { + } + + /** + * Performs a deep copy on other. + */ + public WMCreateOrUpdatePoolRequest(WMCreateOrUpdatePoolRequest other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetPool()) { + this.pool = new WMPool(other.pool); + } + if (other.isSetNewPoolPath()) { + this.newPoolPath = other.newPoolPath; + } + this.update = other.update; + } + + public WMCreateOrUpdatePoolRequest deepCopy() { + return new WMCreateOrUpdatePoolRequest(this); + } + + @Override + public void clear() { + this.pool = null; + this.newPoolPath = null; + setUpdateIsSet(false); + this.update = false; + } + + public WMPool getPool() { + return this.pool; + } + + public void setPool(WMPool pool) { + this.pool = pool; + } + + public void unsetPool() { + this.pool = null; + } + + /** Returns true if field pool is set (has been assigned a value) and false otherwise */ + public boolean isSetPool() { + return this.pool != null; + } + + public void setPoolIsSet(boolean value) { + if (!value) { + this.pool = null; + } + } + + public String getNewPoolPath() { + return this.newPoolPath; + } + + public void setNewPoolPath(String newPoolPath) { + this.newPoolPath = newPoolPath; + } + + public void unsetNewPoolPath() { + this.newPoolPath = null; + } + + /** Returns true if field newPoolPath is set (has been assigned a value) and false otherwise */ + public boolean isSetNewPoolPath() { + return this.newPoolPath != null; + } + + public void setNewPoolPathIsSet(boolean value) { + if (!value) { + this.newPoolPath = null; + } + } + + public boolean isUpdate() { + return this.update; + } + + public void setUpdate(boolean update) { + this.update = update; + setUpdateIsSet(true); + } + + public void unsetUpdate() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPDATE_ISSET_ID); + } + + /** Returns true if field update is set (has been assigned a value) and false otherwise */ + public boolean isSetUpdate() { + return EncodingUtils.testBit(__isset_bitfield, __UPDATE_ISSET_ID); + } + + public void setUpdateIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPDATE_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case POOL: + if (value == null) { + unsetPool(); + } else { + setPool((WMPool)value); + } + break; + + case NEW_POOL_PATH: + if (value == null) { + unsetNewPoolPath(); + } else { + setNewPoolPath((String)value); + } + break; + + case UPDATE: + if (value == null) { + unsetUpdate(); + } else { + setUpdate((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case POOL: + return getPool(); + + case NEW_POOL_PATH: + return getNewPoolPath(); + + case UPDATE: + return isUpdate(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case POOL: + return isSetPool(); + case NEW_POOL_PATH: + return isSetNewPoolPath(); + case UPDATE: + return isSetUpdate(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WMCreateOrUpdatePoolRequest) + return this.equals((WMCreateOrUpdatePoolRequest)that); + return false; + } + + public boolean equals(WMCreateOrUpdatePoolRequest that) { + if (that == null) + return false; + + boolean this_present_pool = true && this.isSetPool(); + boolean that_present_pool = true && that.isSetPool(); + if (this_present_pool || that_present_pool) { + if (!(this_present_pool && that_present_pool)) + return false; + if (!this.pool.equals(that.pool)) + return false; + } + + boolean this_present_newPoolPath = true && this.isSetNewPoolPath(); + boolean that_present_newPoolPath = true && that.isSetNewPoolPath(); + if (this_present_newPoolPath || that_present_newPoolPath) { + if (!(this_present_newPoolPath && that_present_newPoolPath)) + return false; + if (!this.newPoolPath.equals(that.newPoolPath)) + return false; + } + + boolean this_present_update = true && this.isSetUpdate(); + boolean that_present_update = true && that.isSetUpdate(); + if (this_present_update || that_present_update) { + if (!(this_present_update && that_present_update)) + return false; + if (this.update != that.update) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_pool = true && (isSetPool()); + list.add(present_pool); + if (present_pool) + list.add(pool); + + boolean present_newPoolPath = true && (isSetNewPoolPath()); + list.add(present_newPoolPath); + if (present_newPoolPath) + list.add(newPoolPath); + + boolean present_update = true && (isSetUpdate()); + list.add(present_update); + if (present_update) + list.add(update); + + return list.hashCode(); + } + + @Override + public int compareTo(WMCreateOrUpdatePoolRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetPool()).compareTo(other.isSetPool()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPool()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pool, other.pool); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNewPoolPath()).compareTo(other.isSetNewPoolPath()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNewPoolPath()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.newPoolPath, other.newPoolPath); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetUpdate()).compareTo(other.isSetUpdate()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetUpdate()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.update, other.update); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WMCreateOrUpdatePoolRequest("); + boolean first = true; + + if (isSetPool()) { + sb.append("pool:"); + if (this.pool == null) { + sb.append("null"); + } else { + sb.append(this.pool); + } + first = false; + } + if (isSetNewPoolPath()) { + if (!first) sb.append(", "); + sb.append("newPoolPath:"); + if (this.newPoolPath == null) { + sb.append("null"); + } else { + sb.append(this.newPoolPath); + } + first = false; + } + if (isSetUpdate()) { + if (!first) sb.append(", "); + sb.append("update:"); + sb.append(this.update); + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (pool != null) { + pool.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class WMCreateOrUpdatePoolRequestStandardSchemeFactory implements SchemeFactory { + public WMCreateOrUpdatePoolRequestStandardScheme getScheme() { + return new WMCreateOrUpdatePoolRequestStandardScheme(); + } + } + + private static class WMCreateOrUpdatePoolRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, WMCreateOrUpdatePoolRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // POOL + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.pool = new WMPool(); + struct.pool.read(iprot); + struct.setPoolIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // NEW_POOL_PATH + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.newPoolPath = iprot.readString(); + struct.setNewPoolPathIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // UPDATE + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.update = iprot.readBool(); + struct.setUpdateIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, WMCreateOrUpdatePoolRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.pool != null) { + if (struct.isSetPool()) { + oprot.writeFieldBegin(POOL_FIELD_DESC); + struct.pool.write(oprot); + oprot.writeFieldEnd(); + } + } + if (struct.newPoolPath != null) { + if (struct.isSetNewPoolPath()) { + oprot.writeFieldBegin(NEW_POOL_PATH_FIELD_DESC); + oprot.writeString(struct.newPoolPath); + oprot.writeFieldEnd(); + } + } + if (struct.isSetUpdate()) { + oprot.writeFieldBegin(UPDATE_FIELD_DESC); + oprot.writeBool(struct.update); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class WMCreateOrUpdatePoolRequestTupleSchemeFactory implements SchemeFactory { + public WMCreateOrUpdatePoolRequestTupleScheme getScheme() { + return new WMCreateOrUpdatePoolRequestTupleScheme(); + } + } + + private static class WMCreateOrUpdatePoolRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, WMCreateOrUpdatePoolRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetPool()) { + optionals.set(0); + } + if (struct.isSetNewPoolPath()) { + optionals.set(1); + } + if (struct.isSetUpdate()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetPool()) { + struct.pool.write(oprot); + } + if (struct.isSetNewPoolPath()) { + oprot.writeString(struct.newPoolPath); + } + if (struct.isSetUpdate()) { + oprot.writeBool(struct.update); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, WMCreateOrUpdatePoolRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.pool = new WMPool(); + struct.pool.read(iprot); + struct.setPoolIsSet(true); + } + if (incoming.get(1)) { + struct.newPoolPath = iprot.readString(); + struct.setNewPoolPathIsSet(true); + } + if (incoming.get(2)) { + struct.update = iprot.readBool(); + struct.setUpdateIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdatePoolResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdatePoolResponse.java new file mode 100644 index 0000000000..a8712165bf --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMCreateOrUpdatePoolResponse.java @@ -0,0 +1,283 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMCreateOrUpdatePoolResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMCreateOrUpdatePoolResponse"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new WMCreateOrUpdatePoolResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new WMCreateOrUpdatePoolResponseTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMCreateOrUpdatePoolResponse.class, metaDataMap); + } + + public WMCreateOrUpdatePoolResponse() { + } + + /** + * Performs a deep copy on other. + */ + public WMCreateOrUpdatePoolResponse(WMCreateOrUpdatePoolResponse other) { + } + + public WMCreateOrUpdatePoolResponse deepCopy() { + return new WMCreateOrUpdatePoolResponse(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WMCreateOrUpdatePoolResponse) + return this.equals((WMCreateOrUpdatePoolResponse)that); + return false; + } + + public boolean equals(WMCreateOrUpdatePoolResponse that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(WMCreateOrUpdatePoolResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WMCreateOrUpdatePoolResponse("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class WMCreateOrUpdatePoolResponseStandardSchemeFactory implements SchemeFactory { + public WMCreateOrUpdatePoolResponseStandardScheme getScheme() { + return new WMCreateOrUpdatePoolResponseStandardScheme(); + } + } + + private static class WMCreateOrUpdatePoolResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, WMCreateOrUpdatePoolResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, WMCreateOrUpdatePoolResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class WMCreateOrUpdatePoolResponseTupleSchemeFactory implements SchemeFactory { + public WMCreateOrUpdatePoolResponseTupleScheme getScheme() { + return new WMCreateOrUpdatePoolResponseTupleScheme(); + } + } + + private static class WMCreateOrUpdatePoolResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, WMCreateOrUpdatePoolResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, WMCreateOrUpdatePoolResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropMappingRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropMappingRequest.java new file mode 100644 index 0000000000..4fdf4029dd --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropMappingRequest.java @@ -0,0 +1,398 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMDropMappingRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMDropMappingRequest"); + + private static final org.apache.thrift.protocol.TField MAPPING_FIELD_DESC = new org.apache.thrift.protocol.TField("mapping", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new WMDropMappingRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new WMDropMappingRequestTupleSchemeFactory()); + } + + private WMMapping mapping; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + MAPPING((short)1, "mapping"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // MAPPING + return MAPPING; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final _Fields optionals[] = {_Fields.MAPPING}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.MAPPING, new org.apache.thrift.meta_data.FieldMetaData("mapping", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMMapping.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMDropMappingRequest.class, metaDataMap); + } + + public WMDropMappingRequest() { + } + + /** + * Performs a deep copy on other. + */ + public WMDropMappingRequest(WMDropMappingRequest other) { + if (other.isSetMapping()) { + this.mapping = new WMMapping(other.mapping); + } + } + + public WMDropMappingRequest deepCopy() { + return new WMDropMappingRequest(this); + } + + @Override + public void clear() { + this.mapping = null; + } + + public WMMapping getMapping() { + return this.mapping; + } + + public void setMapping(WMMapping mapping) { + this.mapping = mapping; + } + + public void unsetMapping() { + this.mapping = null; + } + + /** Returns true if field mapping is set (has been assigned a value) and false otherwise */ + public boolean isSetMapping() { + return this.mapping != null; + } + + public void setMappingIsSet(boolean value) { + if (!value) { + this.mapping = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case MAPPING: + if (value == null) { + unsetMapping(); + } else { + setMapping((WMMapping)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case MAPPING: + return getMapping(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case MAPPING: + return isSetMapping(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WMDropMappingRequest) + return this.equals((WMDropMappingRequest)that); + return false; + } + + public boolean equals(WMDropMappingRequest that) { + if (that == null) + return false; + + boolean this_present_mapping = true && this.isSetMapping(); + boolean that_present_mapping = true && that.isSetMapping(); + if (this_present_mapping || that_present_mapping) { + if (!(this_present_mapping && that_present_mapping)) + return false; + if (!this.mapping.equals(that.mapping)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_mapping = true && (isSetMapping()); + list.add(present_mapping); + if (present_mapping) + list.add(mapping); + + return list.hashCode(); + } + + @Override + public int compareTo(WMDropMappingRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetMapping()).compareTo(other.isSetMapping()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMapping()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mapping, other.mapping); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WMDropMappingRequest("); + boolean first = true; + + if (isSetMapping()) { + sb.append("mapping:"); + if (this.mapping == null) { + sb.append("null"); + } else { + sb.append(this.mapping); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (mapping != null) { + mapping.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class WMDropMappingRequestStandardSchemeFactory implements SchemeFactory { + public WMDropMappingRequestStandardScheme getScheme() { + return new WMDropMappingRequestStandardScheme(); + } + } + + private static class WMDropMappingRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, WMDropMappingRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // MAPPING + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.mapping = new WMMapping(); + struct.mapping.read(iprot); + struct.setMappingIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, WMDropMappingRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.mapping != null) { + if (struct.isSetMapping()) { + oprot.writeFieldBegin(MAPPING_FIELD_DESC); + struct.mapping.write(oprot); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class WMDropMappingRequestTupleSchemeFactory implements SchemeFactory { + public WMDropMappingRequestTupleScheme getScheme() { + return new WMDropMappingRequestTupleScheme(); + } + } + + private static class WMDropMappingRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, WMDropMappingRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetMapping()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetMapping()) { + struct.mapping.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, WMDropMappingRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.mapping = new WMMapping(); + struct.mapping.read(iprot); + struct.setMappingIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropMappingResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropMappingResponse.java new file mode 100644 index 0000000000..f3fd4b3160 --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropMappingResponse.java @@ -0,0 +1,283 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMDropMappingResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMDropMappingResponse"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new WMDropMappingResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new WMDropMappingResponseTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMDropMappingResponse.class, metaDataMap); + } + + public WMDropMappingResponse() { + } + + /** + * Performs a deep copy on other. + */ + public WMDropMappingResponse(WMDropMappingResponse other) { + } + + public WMDropMappingResponse deepCopy() { + return new WMDropMappingResponse(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WMDropMappingResponse) + return this.equals((WMDropMappingResponse)that); + return false; + } + + public boolean equals(WMDropMappingResponse that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(WMDropMappingResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WMDropMappingResponse("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class WMDropMappingResponseStandardSchemeFactory implements SchemeFactory { + public WMDropMappingResponseStandardScheme getScheme() { + return new WMDropMappingResponseStandardScheme(); + } + } + + private static class WMDropMappingResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, WMDropMappingResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, WMDropMappingResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class WMDropMappingResponseTupleSchemeFactory implements SchemeFactory { + public WMDropMappingResponseTupleScheme getScheme() { + return new WMDropMappingResponseTupleScheme(); + } + } + + private static class WMDropMappingResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, WMDropMappingResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, WMDropMappingResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropPoolRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropPoolRequest.java new file mode 100644 index 0000000000..114cdde46a --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropPoolRequest.java @@ -0,0 +1,499 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMDropPoolRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMDropPoolRequest"); + + private static final org.apache.thrift.protocol.TField RESOURCE_PLAN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("resourcePlanName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField POOL_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("poolPath", org.apache.thrift.protocol.TType.STRING, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new WMDropPoolRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new WMDropPoolRequestTupleSchemeFactory()); + } + + private String resourcePlanName; // optional + private String poolPath; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RESOURCE_PLAN_NAME((short)1, "resourcePlanName"), + POOL_PATH((short)2, "poolPath"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RESOURCE_PLAN_NAME + return RESOURCE_PLAN_NAME; + case 2: // POOL_PATH + return POOL_PATH; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final _Fields optionals[] = {_Fields.RESOURCE_PLAN_NAME,_Fields.POOL_PATH}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RESOURCE_PLAN_NAME, new org.apache.thrift.meta_data.FieldMetaData("resourcePlanName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.POOL_PATH, new org.apache.thrift.meta_data.FieldMetaData("poolPath", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMDropPoolRequest.class, metaDataMap); + } + + public WMDropPoolRequest() { + } + + /** + * Performs a deep copy on other. + */ + public WMDropPoolRequest(WMDropPoolRequest other) { + if (other.isSetResourcePlanName()) { + this.resourcePlanName = other.resourcePlanName; + } + if (other.isSetPoolPath()) { + this.poolPath = other.poolPath; + } + } + + public WMDropPoolRequest deepCopy() { + return new WMDropPoolRequest(this); + } + + @Override + public void clear() { + this.resourcePlanName = null; + this.poolPath = null; + } + + public String getResourcePlanName() { + return this.resourcePlanName; + } + + public void setResourcePlanName(String resourcePlanName) { + this.resourcePlanName = resourcePlanName; + } + + public void unsetResourcePlanName() { + this.resourcePlanName = null; + } + + /** Returns true if field resourcePlanName is set (has been assigned a value) and false otherwise */ + public boolean isSetResourcePlanName() { + return this.resourcePlanName != null; + } + + public void setResourcePlanNameIsSet(boolean value) { + if (!value) { + this.resourcePlanName = null; + } + } + + public String getPoolPath() { + return this.poolPath; + } + + public void setPoolPath(String poolPath) { + this.poolPath = poolPath; + } + + public void unsetPoolPath() { + this.poolPath = null; + } + + /** Returns true if field poolPath is set (has been assigned a value) and false otherwise */ + public boolean isSetPoolPath() { + return this.poolPath != null; + } + + public void setPoolPathIsSet(boolean value) { + if (!value) { + this.poolPath = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RESOURCE_PLAN_NAME: + if (value == null) { + unsetResourcePlanName(); + } else { + setResourcePlanName((String)value); + } + break; + + case POOL_PATH: + if (value == null) { + unsetPoolPath(); + } else { + setPoolPath((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RESOURCE_PLAN_NAME: + return getResourcePlanName(); + + case POOL_PATH: + return getPoolPath(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RESOURCE_PLAN_NAME: + return isSetResourcePlanName(); + case POOL_PATH: + return isSetPoolPath(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WMDropPoolRequest) + return this.equals((WMDropPoolRequest)that); + return false; + } + + public boolean equals(WMDropPoolRequest that) { + if (that == null) + return false; + + boolean this_present_resourcePlanName = true && this.isSetResourcePlanName(); + boolean that_present_resourcePlanName = true && that.isSetResourcePlanName(); + if (this_present_resourcePlanName || that_present_resourcePlanName) { + if (!(this_present_resourcePlanName && that_present_resourcePlanName)) + return false; + if (!this.resourcePlanName.equals(that.resourcePlanName)) + return false; + } + + boolean this_present_poolPath = true && this.isSetPoolPath(); + boolean that_present_poolPath = true && that.isSetPoolPath(); + if (this_present_poolPath || that_present_poolPath) { + if (!(this_present_poolPath && that_present_poolPath)) + return false; + if (!this.poolPath.equals(that.poolPath)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_resourcePlanName = true && (isSetResourcePlanName()); + list.add(present_resourcePlanName); + if (present_resourcePlanName) + list.add(resourcePlanName); + + boolean present_poolPath = true && (isSetPoolPath()); + list.add(present_poolPath); + if (present_poolPath) + list.add(poolPath); + + return list.hashCode(); + } + + @Override + public int compareTo(WMDropPoolRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetResourcePlanName()).compareTo(other.isSetResourcePlanName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetResourcePlanName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.resourcePlanName, other.resourcePlanName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPoolPath()).compareTo(other.isSetPoolPath()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPoolPath()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.poolPath, other.poolPath); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WMDropPoolRequest("); + boolean first = true; + + if (isSetResourcePlanName()) { + sb.append("resourcePlanName:"); + if (this.resourcePlanName == null) { + sb.append("null"); + } else { + sb.append(this.resourcePlanName); + } + first = false; + } + if (isSetPoolPath()) { + if (!first) sb.append(", "); + sb.append("poolPath:"); + if (this.poolPath == null) { + sb.append("null"); + } else { + sb.append(this.poolPath); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class WMDropPoolRequestStandardSchemeFactory implements SchemeFactory { + public WMDropPoolRequestStandardScheme getScheme() { + return new WMDropPoolRequestStandardScheme(); + } + } + + private static class WMDropPoolRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, WMDropPoolRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RESOURCE_PLAN_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.resourcePlanName = iprot.readString(); + struct.setResourcePlanNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // POOL_PATH + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.poolPath = iprot.readString(); + struct.setPoolPathIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, WMDropPoolRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.resourcePlanName != null) { + if (struct.isSetResourcePlanName()) { + oprot.writeFieldBegin(RESOURCE_PLAN_NAME_FIELD_DESC); + oprot.writeString(struct.resourcePlanName); + oprot.writeFieldEnd(); + } + } + if (struct.poolPath != null) { + if (struct.isSetPoolPath()) { + oprot.writeFieldBegin(POOL_PATH_FIELD_DESC); + oprot.writeString(struct.poolPath); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class WMDropPoolRequestTupleSchemeFactory implements SchemeFactory { + public WMDropPoolRequestTupleScheme getScheme() { + return new WMDropPoolRequestTupleScheme(); + } + } + + private static class WMDropPoolRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, WMDropPoolRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetResourcePlanName()) { + optionals.set(0); + } + if (struct.isSetPoolPath()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetResourcePlanName()) { + oprot.writeString(struct.resourcePlanName); + } + if (struct.isSetPoolPath()) { + oprot.writeString(struct.poolPath); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, WMDropPoolRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.resourcePlanName = iprot.readString(); + struct.setResourcePlanNameIsSet(true); + } + if (incoming.get(1)) { + struct.poolPath = iprot.readString(); + struct.setPoolPathIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropPoolResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropPoolResponse.java new file mode 100644 index 0000000000..602754c959 --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMDropPoolResponse.java @@ -0,0 +1,283 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class WMDropPoolResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WMDropPoolResponse"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new WMDropPoolResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new WMDropPoolResponseTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMDropPoolResponse.class, metaDataMap); + } + + public WMDropPoolResponse() { + } + + /** + * Performs a deep copy on other. + */ + public WMDropPoolResponse(WMDropPoolResponse other) { + } + + public WMDropPoolResponse deepCopy() { + return new WMDropPoolResponse(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof WMDropPoolResponse) + return this.equals((WMDropPoolResponse)that); + return false; + } + + public boolean equals(WMDropPoolResponse that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(WMDropPoolResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("WMDropPoolResponse("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class WMDropPoolResponseStandardSchemeFactory implements SchemeFactory { + public WMDropPoolResponseStandardScheme getScheme() { + return new WMDropPoolResponseStandardScheme(); + } + } + + private static class WMDropPoolResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, WMDropPoolResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, WMDropPoolResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class WMDropPoolResponseTupleSchemeFactory implements SchemeFactory { + public WMDropPoolResponseTupleScheme getScheme() { + return new WMDropPoolResponseTupleScheme(); + } + } + + private static class WMDropPoolResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, WMDropPoolResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, WMDropPoolResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMMapping.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMMapping.java index af7ee52936..8b7d41a85f 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMMapping.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMMapping.java @@ -41,7 +41,7 @@ private static final org.apache.thrift.protocol.TField RESOURCE_PLAN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("resourcePlanName", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField ENTITY_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("entityType", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField ENTITY_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("entityName", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField POOL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("poolName", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField POOL_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("poolPath", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField ORDERING_FIELD_DESC = new org.apache.thrift.protocol.TField("ordering", org.apache.thrift.protocol.TType.I32, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); @@ -53,7 +53,7 @@ private String resourcePlanName; // required private String entityType; // required private String entityName; // required - private String poolName; // optional + private String poolPath; // optional private int ordering; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -61,7 +61,7 @@ RESOURCE_PLAN_NAME((short)1, "resourcePlanName"), ENTITY_TYPE((short)2, "entityType"), ENTITY_NAME((short)3, "entityName"), - POOL_NAME((short)4, "poolName"), + POOL_PATH((short)4, "poolPath"), ORDERING((short)5, "ordering"); private static final Map byName = new HashMap(); @@ -83,8 +83,8 @@ public static _Fields findByThriftId(int fieldId) { return ENTITY_TYPE; case 3: // ENTITY_NAME return ENTITY_NAME; - case 4: // POOL_NAME - return POOL_NAME; + case 4: // POOL_PATH + return POOL_PATH; case 5: // ORDERING return ORDERING; default: @@ -129,7 +129,7 @@ public String getFieldName() { // isset id assignments private static final int __ORDERING_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.POOL_NAME,_Fields.ORDERING}; + private static final _Fields optionals[] = {_Fields.POOL_PATH,_Fields.ORDERING}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -139,7 +139,7 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.ENTITY_NAME, new org.apache.thrift.meta_data.FieldMetaData("entityName", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.POOL_NAME, new org.apache.thrift.meta_data.FieldMetaData("poolName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + tmpMap.put(_Fields.POOL_PATH, new org.apache.thrift.meta_data.FieldMetaData("poolPath", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.ORDERING, new org.apache.thrift.meta_data.FieldMetaData("ordering", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); @@ -175,8 +175,8 @@ public WMMapping(WMMapping other) { if (other.isSetEntityName()) { this.entityName = other.entityName; } - if (other.isSetPoolName()) { - this.poolName = other.poolName; + if (other.isSetPoolPath()) { + this.poolPath = other.poolPath; } this.ordering = other.ordering; } @@ -190,7 +190,7 @@ public void clear() { this.resourcePlanName = null; this.entityType = null; this.entityName = null; - this.poolName = null; + this.poolPath = null; setOrderingIsSet(false); this.ordering = 0; } @@ -264,26 +264,26 @@ public void setEntityNameIsSet(boolean value) { } } - public String getPoolName() { - return this.poolName; + public String getPoolPath() { + return this.poolPath; } - public void setPoolName(String poolName) { - this.poolName = poolName; + public void setPoolPath(String poolPath) { + this.poolPath = poolPath; } - public void unsetPoolName() { - this.poolName = null; + public void unsetPoolPath() { + this.poolPath = null; } - /** Returns true if field poolName is set (has been assigned a value) and false otherwise */ - public boolean isSetPoolName() { - return this.poolName != null; + /** Returns true if field poolPath is set (has been assigned a value) and false otherwise */ + public boolean isSetPoolPath() { + return this.poolPath != null; } - public void setPoolNameIsSet(boolean value) { + public void setPoolPathIsSet(boolean value) { if (!value) { - this.poolName = null; + this.poolPath = null; } } @@ -335,11 +335,11 @@ public void setFieldValue(_Fields field, Object value) { } break; - case POOL_NAME: + case POOL_PATH: if (value == null) { - unsetPoolName(); + unsetPoolPath(); } else { - setPoolName((String)value); + setPoolPath((String)value); } break; @@ -365,8 +365,8 @@ public Object getFieldValue(_Fields field) { case ENTITY_NAME: return getEntityName(); - case POOL_NAME: - return getPoolName(); + case POOL_PATH: + return getPoolPath(); case ORDERING: return getOrdering(); @@ -388,8 +388,8 @@ public boolean isSet(_Fields field) { return isSetEntityType(); case ENTITY_NAME: return isSetEntityName(); - case POOL_NAME: - return isSetPoolName(); + case POOL_PATH: + return isSetPoolPath(); case ORDERING: return isSetOrdering(); } @@ -436,12 +436,12 @@ public boolean equals(WMMapping that) { return false; } - boolean this_present_poolName = true && this.isSetPoolName(); - boolean that_present_poolName = true && that.isSetPoolName(); - if (this_present_poolName || that_present_poolName) { - if (!(this_present_poolName && that_present_poolName)) + boolean this_present_poolPath = true && this.isSetPoolPath(); + boolean that_present_poolPath = true && that.isSetPoolPath(); + if (this_present_poolPath || that_present_poolPath) { + if (!(this_present_poolPath && that_present_poolPath)) return false; - if (!this.poolName.equals(that.poolName)) + if (!this.poolPath.equals(that.poolPath)) return false; } @@ -476,10 +476,10 @@ public int hashCode() { if (present_entityName) list.add(entityName); - boolean present_poolName = true && (isSetPoolName()); - list.add(present_poolName); - if (present_poolName) - list.add(poolName); + boolean present_poolPath = true && (isSetPoolPath()); + list.add(present_poolPath); + if (present_poolPath) + list.add(poolPath); boolean present_ordering = true && (isSetOrdering()); list.add(present_ordering); @@ -527,12 +527,12 @@ public int compareTo(WMMapping other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetPoolName()).compareTo(other.isSetPoolName()); + lastComparison = Boolean.valueOf(isSetPoolPath()).compareTo(other.isSetPoolPath()); if (lastComparison != 0) { return lastComparison; } - if (isSetPoolName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.poolName, other.poolName); + if (isSetPoolPath()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.poolPath, other.poolPath); if (lastComparison != 0) { return lastComparison; } @@ -590,13 +590,13 @@ public String toString() { sb.append(this.entityName); } first = false; - if (isSetPoolName()) { + if (isSetPoolPath()) { if (!first) sb.append(", "); - sb.append("poolName:"); - if (this.poolName == null) { + sb.append("poolPath:"); + if (this.poolPath == null) { sb.append("null"); } else { - sb.append(this.poolName); + sb.append(this.poolPath); } first = false; } @@ -687,10 +687,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMMapping struct) t org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // POOL_NAME + case 4: // POOL_PATH if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.poolName = iprot.readString(); - struct.setPoolNameIsSet(true); + struct.poolPath = iprot.readString(); + struct.setPoolPathIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -731,10 +731,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMMapping struct) oprot.writeString(struct.entityName); oprot.writeFieldEnd(); } - if (struct.poolName != null) { - if (struct.isSetPoolName()) { - oprot.writeFieldBegin(POOL_NAME_FIELD_DESC); - oprot.writeString(struct.poolName); + if (struct.poolPath != null) { + if (struct.isSetPoolPath()) { + oprot.writeFieldBegin(POOL_PATH_FIELD_DESC); + oprot.writeString(struct.poolPath); oprot.writeFieldEnd(); } } @@ -764,15 +764,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMMapping struct) t oprot.writeString(struct.entityType); oprot.writeString(struct.entityName); BitSet optionals = new BitSet(); - if (struct.isSetPoolName()) { + if (struct.isSetPoolPath()) { optionals.set(0); } if (struct.isSetOrdering()) { optionals.set(1); } oprot.writeBitSet(optionals, 2); - if (struct.isSetPoolName()) { - oprot.writeString(struct.poolName); + if (struct.isSetPoolPath()) { + oprot.writeString(struct.poolPath); } if (struct.isSetOrdering()) { oprot.writeI32(struct.ordering); @@ -790,8 +790,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMMapping struct) th struct.setEntityNameIsSet(true); BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { - struct.poolName = iprot.readString(); - struct.setPoolNameIsSet(true); + struct.poolPath = iprot.readString(); + struct.setPoolPathIsSet(true); } if (incoming.get(1)) { struct.ordering = iprot.readI32(); diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index e70b41b588..cc5952d1a4 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -1329,6 +1329,49 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @throws \metastore\MetaException */ public function get_triggers_for_resourceplan(\metastore\WMGetTriggersForResourePlanRequest $request); + /** + * @param \metastore\WMCreateOrUpdatePoolRequest $request + * @return \metastore\WMCreateOrUpdatePoolResponse + * @throws \metastore\AlreadyExistsException + * @throws \metastore\NoSuchObjectException + * @throws \metastore\InvalidObjectException + * @throws \metastore\MetaException + */ + public function create_or_update_wm_pool(\metastore\WMCreateOrUpdatePoolRequest $request); + /** + * @param \metastore\WMDropPoolRequest $request + * @return \metastore\WMDropPoolResponse + * @throws \metastore\NoSuchObjectException + * @throws \metastore\InvalidOperationException + * @throws \metastore\MetaException + */ + public function drop_wm_pool(\metastore\WMDropPoolRequest $request); + /** + * @param \metastore\WMCreateOrUpdateMappingRequest $request + * @return \metastore\WMCreateOrUpdateMappingResponse + * @throws \metastore\AlreadyExistsException + * @throws \metastore\NoSuchObjectException + * @throws \metastore\InvalidObjectException + * @throws \metastore\MetaException + */ + public function create_or_update_wm_mapping(\metastore\WMCreateOrUpdateMappingRequest $request); + /** + * @param \metastore\WMDropMappingRequest $request + * @return \metastore\WMDropMappingResponse + * @throws \metastore\NoSuchObjectException + * @throws \metastore\InvalidOperationException + * @throws \metastore\MetaException + */ + public function drop_wm_mapping(\metastore\WMDropMappingRequest $request); + /** + * @param \metastore\WMCreateOrDropTriggerToPoolMappingRequest $request + * @return \metastore\WMCreateOrDropTriggerToPoolMappingResponse + * @throws \metastore\AlreadyExistsException + * @throws \metastore\NoSuchObjectException + * @throws \metastore\InvalidObjectException + * @throws \metastore\MetaException + */ + public function create_or_drop_wm_trigger_to_pool_mapping(\metastore\WMCreateOrDropTriggerToPoolMappingRequest $request); } class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf { @@ -11159,196 +11202,326 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_triggers_for_resourceplan failed: unknown result"); } -} - -// HELPER FUNCTIONS AND STRUCTURES + public function create_or_update_wm_pool(\metastore\WMCreateOrUpdatePoolRequest $request) + { + $this->send_create_or_update_wm_pool($request); + return $this->recv_create_or_update_wm_pool(); + } -class ThriftHiveMetastore_getMetaConf_args { - static $_TSPEC; + public function send_create_or_update_wm_pool(\metastore\WMCreateOrUpdatePoolRequest $request) + { + $args = new \metastore\ThriftHiveMetastore_create_or_update_wm_pool_args(); + $args->request = $request; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'create_or_update_wm_pool', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('create_or_update_wm_pool', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } - /** - * @var string - */ - public $key = null; + public function recv_create_or_update_wm_pool() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_create_or_update_wm_pool_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 1 => array( - 'var' => 'key', - 'type' => TType::STRING, - ), - ); - } - if (is_array($vals)) { - if (isset($vals['key'])) { - $this->key = $vals['key']; + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; } + $result = new \metastore\ThriftHiveMetastore_create_or_update_wm_pool_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + if ($result->o4 !== null) { + throw $result->o4; + } + throw new \Exception("create_or_update_wm_pool failed: unknown result"); } - public function getName() { - return 'ThriftHiveMetastore_getMetaConf_args'; + public function drop_wm_pool(\metastore\WMDropPoolRequest $request) + { + $this->send_drop_wm_pool($request); + return $this->recv_drop_wm_pool(); } - public function read($input) + public function send_drop_wm_pool(\metastore\WMDropPoolRequest $request) { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) + $args = new \metastore\ThriftHiveMetastore_drop_wm_pool_args(); + $args->request = $request; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->key); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; + thrift_protocol_write_binary($this->output_, 'drop_wm_pool', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('drop_wm_pool', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_drop_wm_pool() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_drop_wm_pool_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; } - $xfer += $input->readFieldEnd(); + $result = new \metastore\ThriftHiveMetastore_drop_wm_pool_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); } - $xfer += $input->readStructEnd(); - return $xfer; + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + throw new \Exception("drop_wm_pool failed: unknown result"); } - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_args'); - if ($this->key !== null) { - $xfer += $output->writeFieldBegin('key', TType::STRING, 1); - $xfer += $output->writeString($this->key); - $xfer += $output->writeFieldEnd(); + public function create_or_update_wm_mapping(\metastore\WMCreateOrUpdateMappingRequest $request) + { + $this->send_create_or_update_wm_mapping($request); + return $this->recv_create_or_update_wm_mapping(); + } + + public function send_create_or_update_wm_mapping(\metastore\WMCreateOrUpdateMappingRequest $request) + { + $args = new \metastore\ThriftHiveMetastore_create_or_update_wm_mapping_args(); + $args->request = $request; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'create_or_update_wm_mapping', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('create_or_update_wm_mapping', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; } -} + public function recv_create_or_update_wm_mapping() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_create_or_update_wm_mapping_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; -class ThriftHiveMetastore_getMetaConf_result { - static $_TSPEC; + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_create_or_update_wm_mapping_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + if ($result->o4 !== null) { + throw $result->o4; + } + throw new \Exception("create_or_update_wm_mapping failed: unknown result"); + } - /** - * @var string - */ - public $success = null; - /** - * @var \metastore\MetaException - */ - public $o1 = null; + public function drop_wm_mapping(\metastore\WMDropMappingRequest $request) + { + $this->send_drop_wm_mapping($request); + return $this->recv_drop_wm_mapping(); + } - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 0 => array( - 'var' => 'success', - 'type' => TType::STRING, - ), - 1 => array( - 'var' => 'o1', - 'type' => TType::STRUCT, - 'class' => '\metastore\MetaException', - ), - ); + public function send_drop_wm_mapping(\metastore\WMDropMappingRequest $request) + { + $args = new \metastore\ThriftHiveMetastore_drop_wm_mapping_args(); + $args->request = $request; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'drop_wm_mapping', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); } - if (is_array($vals)) { - if (isset($vals['success'])) { - $this->success = $vals['success']; - } - if (isset($vals['o1'])) { - $this->o1 = $vals['o1']; + else + { + $this->output_->writeMessageBegin('drop_wm_mapping', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_drop_wm_mapping() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_drop_wm_mapping_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; } + $result = new \metastore\ThriftHiveMetastore_drop_wm_mapping_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; } + if ($result->o3 !== null) { + throw $result->o3; + } + throw new \Exception("drop_wm_mapping failed: unknown result"); } - public function getName() { - return 'ThriftHiveMetastore_getMetaConf_result'; + public function create_or_drop_wm_trigger_to_pool_mapping(\metastore\WMCreateOrDropTriggerToPoolMappingRequest $request) + { + $this->send_create_or_drop_wm_trigger_to_pool_mapping($request); + return $this->recv_create_or_drop_wm_trigger_to_pool_mapping(); } - public function read($input) + public function send_create_or_drop_wm_trigger_to_pool_mapping(\metastore\WMCreateOrDropTriggerToPoolMappingRequest $request) { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) + $args = new \metastore\ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args(); + $args->request = $request; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 0: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->success); - } else { - $xfer += $input->skip($ftype); - } - break; - case 1: - if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\MetaException(); - $xfer += $this->o1->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); + thrift_protocol_write_binary($this->output_, 'create_or_drop_wm_trigger_to_pool_mapping', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('create_or_drop_wm_trigger_to_pool_mapping', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); } - $xfer += $input->readStructEnd(); - return $xfer; } - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_result'); - if ($this->success !== null) { - $xfer += $output->writeFieldBegin('success', TType::STRING, 0); - $xfer += $output->writeString($this->success); - $xfer += $output->writeFieldEnd(); + public function recv_create_or_drop_wm_trigger_to_pool_mapping() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); } - if ($this->o1 !== null) { - $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); - $xfer += $this->o1->write($output); - $xfer += $output->writeFieldEnd(); + if ($result->success !== null) { + return $result->success; } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + if ($result->o4 !== null) { + throw $result->o4; + } + throw new \Exception("create_or_drop_wm_trigger_to_pool_mapping failed: unknown result"); } } -class ThriftHiveMetastore_setMetaConf_args { +// HELPER FUNCTIONS AND STRUCTURES + +class ThriftHiveMetastore_getMetaConf_args { static $_TSPEC; /** * @var string */ public $key = null; - /** - * @var string - */ - public $value = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -11357,24 +11530,203 @@ class ThriftHiveMetastore_setMetaConf_args { 'var' => 'key', 'type' => TType::STRING, ), - 2 => array( - 'var' => 'value', - 'type' => TType::STRING, - ), ); } if (is_array($vals)) { if (isset($vals['key'])) { $this->key = $vals['key']; } - if (isset($vals['value'])) { - $this->value = $vals['value']; - } } } public function getName() { - return 'ThriftHiveMetastore_setMetaConf_args'; + return 'ThriftHiveMetastore_getMetaConf_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->key); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_args'); + if ($this->key !== null) { + $xfer += $output->writeFieldBegin('key', TType::STRING, 1); + $xfer += $output->writeString($this->key); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_getMetaConf_result { + static $_TSPEC; + + /** + * @var string + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRING, + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_getMetaConf_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->success); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_result'); + if ($this->success !== null) { + $xfer += $output->writeFieldBegin('success', TType::STRING, 0); + $xfer += $output->writeString($this->success); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_setMetaConf_args { + static $_TSPEC; + + /** + * @var string + */ + public $key = null; + /** + * @var string + */ + public $value = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'key', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'value', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['key'])) { + $this->key = $vals['key']; + } + if (isset($vals['value'])) { + $this->value = $vals['value']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_setMetaConf_args'; } public function read($input) @@ -48387,11 +48739,1271 @@ class ThriftHiveMetastore_create_resource_plan_result { } -class ThriftHiveMetastore_get_resource_plan_args { +class ThriftHiveMetastore_get_resource_plan_args { + static $_TSPEC; + + /** + * @var \metastore\WMGetResourcePlanRequest + */ + public $request = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'request', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMGetResourcePlanRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['request'])) { + $this->request = $vals['request']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_resource_plan_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->request = new \metastore\WMGetResourcePlanRequest(); + $xfer += $this->request->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_resource_plan_args'); + if ($this->request !== null) { + if (!is_object($this->request)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1); + $xfer += $this->request->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_resource_plan_result { + static $_TSPEC; + + /** + * @var \metastore\WMGetResourcePlanResponse + */ + public $success = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\MetaException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMGetResourcePlanResponse', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_resource_plan_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\WMGetResourcePlanResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_resource_plan_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_active_resource_plan_args { + static $_TSPEC; + + /** + * @var \metastore\WMGetActiveResourcePlanRequest + */ + public $request = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'request', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMGetActiveResourcePlanRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['request'])) { + $this->request = $vals['request']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_active_resource_plan_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->request = new \metastore\WMGetActiveResourcePlanRequest(); + $xfer += $this->request->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_active_resource_plan_args'); + if ($this->request !== null) { + if (!is_object($this->request)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1); + $xfer += $this->request->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_active_resource_plan_result { + static $_TSPEC; + + /** + * @var \metastore\WMGetActiveResourcePlanResponse + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMGetActiveResourcePlanResponse', + ), + 1 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_active_resource_plan_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\WMGetActiveResourcePlanResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_active_resource_plan_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 1); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_all_resource_plans_args { + static $_TSPEC; + + /** + * @var \metastore\WMGetAllResourcePlanRequest + */ + public $request = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'request', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMGetAllResourcePlanRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['request'])) { + $this->request = $vals['request']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_all_resource_plans_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->request = new \metastore\WMGetAllResourcePlanRequest(); + $xfer += $this->request->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_resource_plans_args'); + if ($this->request !== null) { + if (!is_object($this->request)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1); + $xfer += $this->request->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_all_resource_plans_result { + static $_TSPEC; + + /** + * @var \metastore\WMGetAllResourcePlanResponse + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMGetAllResourcePlanResponse', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_all_resource_plans_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\WMGetAllResourcePlanResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_resource_plans_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_alter_resource_plan_args { + static $_TSPEC; + + /** + * @var \metastore\WMAlterResourcePlanRequest + */ + public $request = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'request', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMAlterResourcePlanRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['request'])) { + $this->request = $vals['request']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_alter_resource_plan_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->request = new \metastore\WMAlterResourcePlanRequest(); + $xfer += $this->request->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_resource_plan_args'); + if ($this->request !== null) { + if (!is_object($this->request)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1); + $xfer += $this->request->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_alter_resource_plan_result { + static $_TSPEC; + + /** + * @var \metastore\WMAlterResourcePlanResponse + */ + public $success = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\InvalidOperationException + */ + public $o2 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMAlterResourcePlanResponse', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidOperationException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_alter_resource_plan_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\WMAlterResourcePlanResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\InvalidOperationException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_resource_plan_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_validate_resource_plan_args { + static $_TSPEC; + + /** + * @var \metastore\WMValidateResourcePlanRequest + */ + public $request = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'request', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMValidateResourcePlanRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['request'])) { + $this->request = $vals['request']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_validate_resource_plan_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->request = new \metastore\WMValidateResourcePlanRequest(); + $xfer += $this->request->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_validate_resource_plan_args'); + if ($this->request !== null) { + if (!is_object($this->request)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1); + $xfer += $this->request->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_validate_resource_plan_result { + static $_TSPEC; + + /** + * @var \metastore\WMValidateResourcePlanResponse + */ + public $success = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\MetaException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMValidateResourcePlanResponse', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_validate_resource_plan_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\WMValidateResourcePlanResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_validate_resource_plan_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_resource_plan_args { + static $_TSPEC; + + /** + * @var \metastore\WMDropResourcePlanRequest + */ + public $request = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'request', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMDropResourcePlanRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['request'])) { + $this->request = $vals['request']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_resource_plan_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->request = new \metastore\WMDropResourcePlanRequest(); + $xfer += $this->request->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_resource_plan_args'); + if ($this->request !== null) { + if (!is_object($this->request)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1); + $xfer += $this->request->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_resource_plan_result { + static $_TSPEC; + + /** + * @var \metastore\WMDropResourcePlanResponse + */ + public $success = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\InvalidOperationException + */ + public $o2 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMDropResourcePlanResponse', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidOperationException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_resource_plan_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\WMDropResourcePlanResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\InvalidOperationException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_resource_plan_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_create_wm_trigger_args { static $_TSPEC; /** - * @var \metastore\WMGetResourcePlanRequest + * @var \metastore\WMCreateTriggerRequest */ public $request = null; @@ -48401,7 +50013,7 @@ class ThriftHiveMetastore_get_resource_plan_args { 1 => array( 'var' => 'request', 'type' => TType::STRUCT, - 'class' => '\metastore\WMGetResourcePlanRequest', + 'class' => '\metastore\WMCreateTriggerRequest', ), ); } @@ -48413,7 +50025,7 @@ class ThriftHiveMetastore_get_resource_plan_args { } public function getName() { - return 'ThriftHiveMetastore_get_resource_plan_args'; + return 'ThriftHiveMetastore_create_wm_trigger_args'; } public function read($input) @@ -48433,7 +50045,7 @@ class ThriftHiveMetastore_get_resource_plan_args { { case 1: if ($ftype == TType::STRUCT) { - $this->request = new \metastore\WMGetResourcePlanRequest(); + $this->request = new \metastore\WMCreateTriggerRequest(); $xfer += $this->request->read($input); } else { $xfer += $input->skip($ftype); @@ -48451,7 +50063,7 @@ class ThriftHiveMetastore_get_resource_plan_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_resource_plan_args'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_wm_trigger_args'); if ($this->request !== null) { if (!is_object($this->request)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -48467,21 +50079,29 @@ class ThriftHiveMetastore_get_resource_plan_args { } -class ThriftHiveMetastore_get_resource_plan_result { +class ThriftHiveMetastore_create_wm_trigger_result { static $_TSPEC; /** - * @var \metastore\WMGetResourcePlanResponse + * @var \metastore\WMCreateTriggerResponse */ public $success = null; /** - * @var \metastore\NoSuchObjectException + * @var \metastore\AlreadyExistsException */ public $o1 = null; /** - * @var \metastore\MetaException + * @var \metastore\NoSuchObjectException */ public $o2 = null; + /** + * @var \metastore\InvalidObjectException + */ + public $o3 = null; + /** + * @var \metastore\MetaException + */ + public $o4 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -48489,16 +50109,26 @@ class ThriftHiveMetastore_get_resource_plan_result { 0 => array( 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\WMGetResourcePlanResponse', + 'class' => '\metastore\WMCreateTriggerResponse', ), 1 => array( 'var' => 'o1', 'type' => TType::STRUCT, - 'class' => '\metastore\NoSuchObjectException', + 'class' => '\metastore\AlreadyExistsException', ), 2 => array( 'var' => 'o2', 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidObjectException', + ), + 4 => array( + 'var' => 'o4', + 'type' => TType::STRUCT, 'class' => '\metastore\MetaException', ), ); @@ -48513,11 +50143,17 @@ class ThriftHiveMetastore_get_resource_plan_result { if (isset($vals['o2'])) { $this->o2 = $vals['o2']; } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + if (isset($vals['o4'])) { + $this->o4 = $vals['o4']; + } } } public function getName() { - return 'ThriftHiveMetastore_get_resource_plan_result'; + return 'ThriftHiveMetastore_create_wm_trigger_result'; } public function read($input) @@ -48537,7 +50173,7 @@ class ThriftHiveMetastore_get_resource_plan_result { { case 0: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\WMGetResourcePlanResponse(); + $this->success = new \metastore\WMCreateTriggerResponse(); $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); @@ -48545,7 +50181,7 @@ class ThriftHiveMetastore_get_resource_plan_result { break; case 1: if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\NoSuchObjectException(); + $this->o1 = new \metastore\AlreadyExistsException(); $xfer += $this->o1->read($input); } else { $xfer += $input->skip($ftype); @@ -48553,12 +50189,28 @@ class ThriftHiveMetastore_get_resource_plan_result { break; case 2: if ($ftype == TType::STRUCT) { - $this->o2 = new \metastore\MetaException(); + $this->o2 = new \metastore\NoSuchObjectException(); $xfer += $this->o2->read($input); } else { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\InvalidObjectException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRUCT) { + $this->o4 = new \metastore\MetaException(); + $xfer += $this->o4->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -48571,7 +50223,7 @@ class ThriftHiveMetastore_get_resource_plan_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_resource_plan_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_wm_trigger_result'); if ($this->success !== null) { if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -48590,6 +50242,16 @@ class ThriftHiveMetastore_get_resource_plan_result { $xfer += $this->o2->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o4 !== null) { + $xfer += $output->writeFieldBegin('o4', TType::STRUCT, 4); + $xfer += $this->o4->write($output); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -48597,11 +50259,11 @@ class ThriftHiveMetastore_get_resource_plan_result { } -class ThriftHiveMetastore_get_active_resource_plan_args { +class ThriftHiveMetastore_alter_wm_trigger_args { static $_TSPEC; /** - * @var \metastore\WMGetActiveResourcePlanRequest + * @var \metastore\WMAlterTriggerRequest */ public $request = null; @@ -48611,7 +50273,7 @@ class ThriftHiveMetastore_get_active_resource_plan_args { 1 => array( 'var' => 'request', 'type' => TType::STRUCT, - 'class' => '\metastore\WMGetActiveResourcePlanRequest', + 'class' => '\metastore\WMAlterTriggerRequest', ), ); } @@ -48623,7 +50285,7 @@ class ThriftHiveMetastore_get_active_resource_plan_args { } public function getName() { - return 'ThriftHiveMetastore_get_active_resource_plan_args'; + return 'ThriftHiveMetastore_alter_wm_trigger_args'; } public function read($input) @@ -48643,7 +50305,7 @@ class ThriftHiveMetastore_get_active_resource_plan_args { { case 1: if ($ftype == TType::STRUCT) { - $this->request = new \metastore\WMGetActiveResourcePlanRequest(); + $this->request = new \metastore\WMAlterTriggerRequest(); $xfer += $this->request->read($input); } else { $xfer += $input->skip($ftype); @@ -48661,7 +50323,7 @@ class ThriftHiveMetastore_get_active_resource_plan_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_active_resource_plan_args'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_wm_trigger_args'); if ($this->request !== null) { if (!is_object($this->request)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -48677,17 +50339,25 @@ class ThriftHiveMetastore_get_active_resource_plan_args { } -class ThriftHiveMetastore_get_active_resource_plan_result { +class ThriftHiveMetastore_alter_wm_trigger_result { static $_TSPEC; /** - * @var \metastore\WMGetActiveResourcePlanResponse + * @var \metastore\WMAlterTriggerResponse */ public $success = null; /** - * @var \metastore\MetaException + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\InvalidObjectException */ public $o2 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -48695,11 +50365,21 @@ class ThriftHiveMetastore_get_active_resource_plan_result { 0 => array( 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\WMGetActiveResourcePlanResponse', + 'class' => '\metastore\WMAlterTriggerResponse', ), 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( 'var' => 'o2', 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidObjectException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, 'class' => '\metastore\MetaException', ), ); @@ -48708,14 +50388,20 @@ class ThriftHiveMetastore_get_active_resource_plan_result { if (isset($vals['success'])) { $this->success = $vals['success']; } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } if (isset($vals['o2'])) { $this->o2 = $vals['o2']; } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } } } public function getName() { - return 'ThriftHiveMetastore_get_active_resource_plan_result'; + return 'ThriftHiveMetastore_alter_wm_trigger_result'; } public function read($input) @@ -48735,7 +50421,7 @@ class ThriftHiveMetastore_get_active_resource_plan_result { { case 0: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\WMGetActiveResourcePlanResponse(); + $this->success = new \metastore\WMAlterTriggerResponse(); $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); @@ -48743,193 +50429,24 @@ class ThriftHiveMetastore_get_active_resource_plan_result { break; case 1: if ($ftype == TType::STRUCT) { - $this->o2 = new \metastore\MetaException(); - $xfer += $this->o2->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); - } - $xfer += $input->readStructEnd(); - return $xfer; - } - - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_active_resource_plan_result'); - if ($this->success !== null) { - if (!is_object($this->success)) { - throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); - } - $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); - $xfer += $this->success->write($output); - $xfer += $output->writeFieldEnd(); - } - if ($this->o2 !== null) { - $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 1); - $xfer += $this->o2->write($output); - $xfer += $output->writeFieldEnd(); - } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; - } - -} - -class ThriftHiveMetastore_get_all_resource_plans_args { - static $_TSPEC; - - /** - * @var \metastore\WMGetAllResourcePlanRequest - */ - public $request = null; - - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 1 => array( - 'var' => 'request', - 'type' => TType::STRUCT, - 'class' => '\metastore\WMGetAllResourcePlanRequest', - ), - ); - } - if (is_array($vals)) { - if (isset($vals['request'])) { - $this->request = $vals['request']; - } - } - } - - public function getName() { - return 'ThriftHiveMetastore_get_all_resource_plans_args'; - } - - public function read($input) - { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) - { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 1: - if ($ftype == TType::STRUCT) { - $this->request = new \metastore\WMGetAllResourcePlanRequest(); - $xfer += $this->request->read($input); + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); } else { $xfer += $input->skip($ftype); } break; - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); - } - $xfer += $input->readStructEnd(); - return $xfer; - } - - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_resource_plans_args'); - if ($this->request !== null) { - if (!is_object($this->request)) { - throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); - } - $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1); - $xfer += $this->request->write($output); - $xfer += $output->writeFieldEnd(); - } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; - } - -} - -class ThriftHiveMetastore_get_all_resource_plans_result { - static $_TSPEC; - - /** - * @var \metastore\WMGetAllResourcePlanResponse - */ - public $success = null; - /** - * @var \metastore\MetaException - */ - public $o1 = null; - - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 0 => array( - 'var' => 'success', - 'type' => TType::STRUCT, - 'class' => '\metastore\WMGetAllResourcePlanResponse', - ), - 1 => array( - 'var' => 'o1', - 'type' => TType::STRUCT, - 'class' => '\metastore\MetaException', - ), - ); - } - if (is_array($vals)) { - if (isset($vals['success'])) { - $this->success = $vals['success']; - } - if (isset($vals['o1'])) { - $this->o1 = $vals['o1']; - } - } - } - - public function getName() { - return 'ThriftHiveMetastore_get_all_resource_plans_result'; - } - - public function read($input) - { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) - { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 0: + case 2: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\WMGetAllResourcePlanResponse(); - $xfer += $this->success->read($input); + $this->o2 = new \metastore\InvalidObjectException(); + $xfer += $this->o2->read($input); } else { $xfer += $input->skip($ftype); } break; - case 1: + case 3: if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\MetaException(); - $xfer += $this->o1->read($input); + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); } else { $xfer += $input->skip($ftype); } @@ -48946,7 +50463,7 @@ class ThriftHiveMetastore_get_all_resource_plans_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_resource_plans_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_wm_trigger_result'); if ($this->success !== null) { if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -48960,6 +50477,16 @@ class ThriftHiveMetastore_get_all_resource_plans_result { $xfer += $this->o1->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -48967,11 +50494,11 @@ class ThriftHiveMetastore_get_all_resource_plans_result { } -class ThriftHiveMetastore_alter_resource_plan_args { +class ThriftHiveMetastore_drop_wm_trigger_args { static $_TSPEC; /** - * @var \metastore\WMAlterResourcePlanRequest + * @var \metastore\WMDropTriggerRequest */ public $request = null; @@ -48981,7 +50508,7 @@ class ThriftHiveMetastore_alter_resource_plan_args { 1 => array( 'var' => 'request', 'type' => TType::STRUCT, - 'class' => '\metastore\WMAlterResourcePlanRequest', + 'class' => '\metastore\WMDropTriggerRequest', ), ); } @@ -48993,7 +50520,7 @@ class ThriftHiveMetastore_alter_resource_plan_args { } public function getName() { - return 'ThriftHiveMetastore_alter_resource_plan_args'; + return 'ThriftHiveMetastore_drop_wm_trigger_args'; } public function read($input) @@ -49013,7 +50540,7 @@ class ThriftHiveMetastore_alter_resource_plan_args { { case 1: if ($ftype == TType::STRUCT) { - $this->request = new \metastore\WMAlterResourcePlanRequest(); + $this->request = new \metastore\WMDropTriggerRequest(); $xfer += $this->request->read($input); } else { $xfer += $input->skip($ftype); @@ -49031,7 +50558,7 @@ class ThriftHiveMetastore_alter_resource_plan_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_resource_plan_args'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_wm_trigger_args'); if ($this->request !== null) { if (!is_object($this->request)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -49047,11 +50574,11 @@ class ThriftHiveMetastore_alter_resource_plan_args { } -class ThriftHiveMetastore_alter_resource_plan_result { +class ThriftHiveMetastore_drop_wm_trigger_result { static $_TSPEC; /** - * @var \metastore\WMAlterResourcePlanResponse + * @var \metastore\WMDropTriggerResponse */ public $success = null; /** @@ -49073,7 +50600,7 @@ class ThriftHiveMetastore_alter_resource_plan_result { 0 => array( 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\WMAlterResourcePlanResponse', + 'class' => '\metastore\WMDropTriggerResponse', ), 1 => array( 'var' => 'o1', @@ -49109,7 +50636,7 @@ class ThriftHiveMetastore_alter_resource_plan_result { } public function getName() { - return 'ThriftHiveMetastore_alter_resource_plan_result'; + return 'ThriftHiveMetastore_drop_wm_trigger_result'; } public function read($input) @@ -49129,7 +50656,7 @@ class ThriftHiveMetastore_alter_resource_plan_result { { case 0: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\WMAlterResourcePlanResponse(); + $this->success = new \metastore\WMDropTriggerResponse(); $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); @@ -49171,7 +50698,7 @@ class ThriftHiveMetastore_alter_resource_plan_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_resource_plan_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_wm_trigger_result'); if ($this->success !== null) { if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -49202,11 +50729,11 @@ class ThriftHiveMetastore_alter_resource_plan_result { } -class ThriftHiveMetastore_validate_resource_plan_args { +class ThriftHiveMetastore_get_triggers_for_resourceplan_args { static $_TSPEC; /** - * @var \metastore\WMValidateResourcePlanRequest + * @var \metastore\WMGetTriggersForResourePlanRequest */ public $request = null; @@ -49216,7 +50743,7 @@ class ThriftHiveMetastore_validate_resource_plan_args { 1 => array( 'var' => 'request', 'type' => TType::STRUCT, - 'class' => '\metastore\WMValidateResourcePlanRequest', + 'class' => '\metastore\WMGetTriggersForResourePlanRequest', ), ); } @@ -49228,7 +50755,7 @@ class ThriftHiveMetastore_validate_resource_plan_args { } public function getName() { - return 'ThriftHiveMetastore_validate_resource_plan_args'; + return 'ThriftHiveMetastore_get_triggers_for_resourceplan_args'; } public function read($input) @@ -49248,7 +50775,7 @@ class ThriftHiveMetastore_validate_resource_plan_args { { case 1: if ($ftype == TType::STRUCT) { - $this->request = new \metastore\WMValidateResourcePlanRequest(); + $this->request = new \metastore\WMGetTriggersForResourePlanRequest(); $xfer += $this->request->read($input); } else { $xfer += $input->skip($ftype); @@ -49266,7 +50793,7 @@ class ThriftHiveMetastore_validate_resource_plan_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_validate_resource_plan_args'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_triggers_for_resourceplan_args'); if ($this->request !== null) { if (!is_object($this->request)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -49282,11 +50809,11 @@ class ThriftHiveMetastore_validate_resource_plan_args { } -class ThriftHiveMetastore_validate_resource_plan_result { +class ThriftHiveMetastore_get_triggers_for_resourceplan_result { static $_TSPEC; /** - * @var \metastore\WMValidateResourcePlanResponse + * @var \metastore\WMGetTriggersForResourePlanResponse */ public $success = null; /** @@ -49304,7 +50831,7 @@ class ThriftHiveMetastore_validate_resource_plan_result { 0 => array( 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\WMValidateResourcePlanResponse', + 'class' => '\metastore\WMGetTriggersForResourePlanResponse', ), 1 => array( 'var' => 'o1', @@ -49332,7 +50859,7 @@ class ThriftHiveMetastore_validate_resource_plan_result { } public function getName() { - return 'ThriftHiveMetastore_validate_resource_plan_result'; + return 'ThriftHiveMetastore_get_triggers_for_resourceplan_result'; } public function read($input) @@ -49352,7 +50879,7 @@ class ThriftHiveMetastore_validate_resource_plan_result { { case 0: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\WMValidateResourcePlanResponse(); + $this->success = new \metastore\WMGetTriggersForResourePlanResponse(); $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); @@ -49386,7 +50913,7 @@ class ThriftHiveMetastore_validate_resource_plan_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_validate_resource_plan_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_triggers_for_resourceplan_result'); if ($this->success !== null) { if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -49412,11 +50939,11 @@ class ThriftHiveMetastore_validate_resource_plan_result { } -class ThriftHiveMetastore_drop_resource_plan_args { +class ThriftHiveMetastore_create_or_update_wm_pool_args { static $_TSPEC; /** - * @var \metastore\WMDropResourcePlanRequest + * @var \metastore\WMCreateOrUpdatePoolRequest */ public $request = null; @@ -49426,7 +50953,7 @@ class ThriftHiveMetastore_drop_resource_plan_args { 1 => array( 'var' => 'request', 'type' => TType::STRUCT, - 'class' => '\metastore\WMDropResourcePlanRequest', + 'class' => '\metastore\WMCreateOrUpdatePoolRequest', ), ); } @@ -49438,7 +50965,7 @@ class ThriftHiveMetastore_drop_resource_plan_args { } public function getName() { - return 'ThriftHiveMetastore_drop_resource_plan_args'; + return 'ThriftHiveMetastore_create_or_update_wm_pool_args'; } public function read($input) @@ -49458,7 +50985,7 @@ class ThriftHiveMetastore_drop_resource_plan_args { { case 1: if ($ftype == TType::STRUCT) { - $this->request = new \metastore\WMDropResourcePlanRequest(); + $this->request = new \metastore\WMCreateOrUpdatePoolRequest(); $xfer += $this->request->read($input); } else { $xfer += $input->skip($ftype); @@ -49476,7 +51003,7 @@ class ThriftHiveMetastore_drop_resource_plan_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_resource_plan_args'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_or_update_wm_pool_args'); if ($this->request !== null) { if (!is_object($this->request)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -49492,25 +51019,29 @@ class ThriftHiveMetastore_drop_resource_plan_args { } -class ThriftHiveMetastore_drop_resource_plan_result { +class ThriftHiveMetastore_create_or_update_wm_pool_result { static $_TSPEC; /** - * @var \metastore\WMDropResourcePlanResponse + * @var \metastore\WMCreateOrUpdatePoolResponse */ public $success = null; /** - * @var \metastore\NoSuchObjectException + * @var \metastore\AlreadyExistsException */ public $o1 = null; /** - * @var \metastore\InvalidOperationException + * @var \metastore\NoSuchObjectException */ public $o2 = null; /** - * @var \metastore\MetaException + * @var \metastore\InvalidObjectException */ public $o3 = null; + /** + * @var \metastore\MetaException + */ + public $o4 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -49518,21 +51049,26 @@ class ThriftHiveMetastore_drop_resource_plan_result { 0 => array( 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\WMDropResourcePlanResponse', + 'class' => '\metastore\WMCreateOrUpdatePoolResponse', ), 1 => array( 'var' => 'o1', 'type' => TType::STRUCT, - 'class' => '\metastore\NoSuchObjectException', + 'class' => '\metastore\AlreadyExistsException', ), 2 => array( 'var' => 'o2', 'type' => TType::STRUCT, - 'class' => '\metastore\InvalidOperationException', + 'class' => '\metastore\NoSuchObjectException', ), 3 => array( 'var' => 'o3', 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidObjectException', + ), + 4 => array( + 'var' => 'o4', + 'type' => TType::STRUCT, 'class' => '\metastore\MetaException', ), ); @@ -49550,11 +51086,14 @@ class ThriftHiveMetastore_drop_resource_plan_result { if (isset($vals['o3'])) { $this->o3 = $vals['o3']; } + if (isset($vals['o4'])) { + $this->o4 = $vals['o4']; + } } } public function getName() { - return 'ThriftHiveMetastore_drop_resource_plan_result'; + return 'ThriftHiveMetastore_create_or_update_wm_pool_result'; } public function read($input) @@ -49574,7 +51113,7 @@ class ThriftHiveMetastore_drop_resource_plan_result { { case 0: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\WMDropResourcePlanResponse(); + $this->success = new \metastore\WMCreateOrUpdatePoolResponse(); $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); @@ -49582,7 +51121,7 @@ class ThriftHiveMetastore_drop_resource_plan_result { break; case 1: if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\NoSuchObjectException(); + $this->o1 = new \metastore\AlreadyExistsException(); $xfer += $this->o1->read($input); } else { $xfer += $input->skip($ftype); @@ -49590,7 +51129,7 @@ class ThriftHiveMetastore_drop_resource_plan_result { break; case 2: if ($ftype == TType::STRUCT) { - $this->o2 = new \metastore\InvalidOperationException(); + $this->o2 = new \metastore\NoSuchObjectException(); $xfer += $this->o2->read($input); } else { $xfer += $input->skip($ftype); @@ -49598,12 +51137,20 @@ class ThriftHiveMetastore_drop_resource_plan_result { break; case 3: if ($ftype == TType::STRUCT) { - $this->o3 = new \metastore\MetaException(); + $this->o3 = new \metastore\InvalidObjectException(); $xfer += $this->o3->read($input); } else { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRUCT) { + $this->o4 = new \metastore\MetaException(); + $xfer += $this->o4->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -49616,7 +51163,7 @@ class ThriftHiveMetastore_drop_resource_plan_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_resource_plan_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_or_update_wm_pool_result'); if ($this->success !== null) { if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -49640,6 +51187,11 @@ class ThriftHiveMetastore_drop_resource_plan_result { $xfer += $this->o3->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->o4 !== null) { + $xfer += $output->writeFieldBegin('o4', TType::STRUCT, 4); + $xfer += $this->o4->write($output); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -49647,11 +51199,11 @@ class ThriftHiveMetastore_drop_resource_plan_result { } -class ThriftHiveMetastore_create_wm_trigger_args { +class ThriftHiveMetastore_drop_wm_pool_args { static $_TSPEC; /** - * @var \metastore\WMCreateTriggerRequest + * @var \metastore\WMDropPoolRequest */ public $request = null; @@ -49661,7 +51213,7 @@ class ThriftHiveMetastore_create_wm_trigger_args { 1 => array( 'var' => 'request', 'type' => TType::STRUCT, - 'class' => '\metastore\WMCreateTriggerRequest', + 'class' => '\metastore\WMDropPoolRequest', ), ); } @@ -49673,7 +51225,7 @@ class ThriftHiveMetastore_create_wm_trigger_args { } public function getName() { - return 'ThriftHiveMetastore_create_wm_trigger_args'; + return 'ThriftHiveMetastore_drop_wm_pool_args'; } public function read($input) @@ -49693,7 +51245,7 @@ class ThriftHiveMetastore_create_wm_trigger_args { { case 1: if ($ftype == TType::STRUCT) { - $this->request = new \metastore\WMCreateTriggerRequest(); + $this->request = new \metastore\WMDropPoolRequest(); $xfer += $this->request->read($input); } else { $xfer += $input->skip($ftype); @@ -49711,7 +51263,7 @@ class ThriftHiveMetastore_create_wm_trigger_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_wm_trigger_args'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_wm_pool_args'); if ($this->request !== null) { if (!is_object($this->request)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -49727,29 +51279,25 @@ class ThriftHiveMetastore_create_wm_trigger_args { } -class ThriftHiveMetastore_create_wm_trigger_result { +class ThriftHiveMetastore_drop_wm_pool_result { static $_TSPEC; /** - * @var \metastore\WMCreateTriggerResponse + * @var \metastore\WMDropPoolResponse */ public $success = null; /** - * @var \metastore\AlreadyExistsException + * @var \metastore\NoSuchObjectException */ public $o1 = null; /** - * @var \metastore\NoSuchObjectException + * @var \metastore\InvalidOperationException */ public $o2 = null; - /** - * @var \metastore\InvalidObjectException - */ - public $o3 = null; /** * @var \metastore\MetaException */ - public $o4 = null; + public $o3 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -49757,26 +51305,21 @@ class ThriftHiveMetastore_create_wm_trigger_result { 0 => array( 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\WMCreateTriggerResponse', + 'class' => '\metastore\WMDropPoolResponse', ), 1 => array( 'var' => 'o1', 'type' => TType::STRUCT, - 'class' => '\metastore\AlreadyExistsException', + 'class' => '\metastore\NoSuchObjectException', ), 2 => array( 'var' => 'o2', 'type' => TType::STRUCT, - 'class' => '\metastore\NoSuchObjectException', + 'class' => '\metastore\InvalidOperationException', ), 3 => array( 'var' => 'o3', 'type' => TType::STRUCT, - 'class' => '\metastore\InvalidObjectException', - ), - 4 => array( - 'var' => 'o4', - 'type' => TType::STRUCT, 'class' => '\metastore\MetaException', ), ); @@ -49794,14 +51337,11 @@ class ThriftHiveMetastore_create_wm_trigger_result { if (isset($vals['o3'])) { $this->o3 = $vals['o3']; } - if (isset($vals['o4'])) { - $this->o4 = $vals['o4']; - } } } public function getName() { - return 'ThriftHiveMetastore_create_wm_trigger_result'; + return 'ThriftHiveMetastore_drop_wm_pool_result'; } public function read($input) @@ -49821,7 +51361,7 @@ class ThriftHiveMetastore_create_wm_trigger_result { { case 0: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\WMCreateTriggerResponse(); + $this->success = new \metastore\WMDropPoolResponse(); $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); @@ -49829,7 +51369,7 @@ class ThriftHiveMetastore_create_wm_trigger_result { break; case 1: if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\AlreadyExistsException(); + $this->o1 = new \metastore\NoSuchObjectException(); $xfer += $this->o1->read($input); } else { $xfer += $input->skip($ftype); @@ -49837,7 +51377,7 @@ class ThriftHiveMetastore_create_wm_trigger_result { break; case 2: if ($ftype == TType::STRUCT) { - $this->o2 = new \metastore\NoSuchObjectException(); + $this->o2 = new \metastore\InvalidOperationException(); $xfer += $this->o2->read($input); } else { $xfer += $input->skip($ftype); @@ -49845,20 +51385,12 @@ class ThriftHiveMetastore_create_wm_trigger_result { break; case 3: if ($ftype == TType::STRUCT) { - $this->o3 = new \metastore\InvalidObjectException(); + $this->o3 = new \metastore\MetaException(); $xfer += $this->o3->read($input); } else { $xfer += $input->skip($ftype); } break; - case 4: - if ($ftype == TType::STRUCT) { - $this->o4 = new \metastore\MetaException(); - $xfer += $this->o4->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; default: $xfer += $input->skip($ftype); break; @@ -49871,7 +51403,7 @@ class ThriftHiveMetastore_create_wm_trigger_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_wm_trigger_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_wm_pool_result'); if ($this->success !== null) { if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -49895,11 +51427,6 @@ class ThriftHiveMetastore_create_wm_trigger_result { $xfer += $this->o3->write($output); $xfer += $output->writeFieldEnd(); } - if ($this->o4 !== null) { - $xfer += $output->writeFieldBegin('o4', TType::STRUCT, 4); - $xfer += $this->o4->write($output); - $xfer += $output->writeFieldEnd(); - } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -49907,11 +51434,11 @@ class ThriftHiveMetastore_create_wm_trigger_result { } -class ThriftHiveMetastore_alter_wm_trigger_args { +class ThriftHiveMetastore_create_or_update_wm_mapping_args { static $_TSPEC; /** - * @var \metastore\WMAlterTriggerRequest + * @var \metastore\WMCreateOrUpdateMappingRequest */ public $request = null; @@ -49921,7 +51448,7 @@ class ThriftHiveMetastore_alter_wm_trigger_args { 1 => array( 'var' => 'request', 'type' => TType::STRUCT, - 'class' => '\metastore\WMAlterTriggerRequest', + 'class' => '\metastore\WMCreateOrUpdateMappingRequest', ), ); } @@ -49933,7 +51460,7 @@ class ThriftHiveMetastore_alter_wm_trigger_args { } public function getName() { - return 'ThriftHiveMetastore_alter_wm_trigger_args'; + return 'ThriftHiveMetastore_create_or_update_wm_mapping_args'; } public function read($input) @@ -49953,7 +51480,7 @@ class ThriftHiveMetastore_alter_wm_trigger_args { { case 1: if ($ftype == TType::STRUCT) { - $this->request = new \metastore\WMAlterTriggerRequest(); + $this->request = new \metastore\WMCreateOrUpdateMappingRequest(); $xfer += $this->request->read($input); } else { $xfer += $input->skip($ftype); @@ -49971,7 +51498,7 @@ class ThriftHiveMetastore_alter_wm_trigger_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_wm_trigger_args'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_or_update_wm_mapping_args'); if ($this->request !== null) { if (!is_object($this->request)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -49987,25 +51514,29 @@ class ThriftHiveMetastore_alter_wm_trigger_args { } -class ThriftHiveMetastore_alter_wm_trigger_result { +class ThriftHiveMetastore_create_or_update_wm_mapping_result { static $_TSPEC; /** - * @var \metastore\WMAlterTriggerResponse + * @var \metastore\WMCreateOrUpdateMappingResponse */ public $success = null; /** - * @var \metastore\NoSuchObjectException + * @var \metastore\AlreadyExistsException */ public $o1 = null; /** - * @var \metastore\InvalidObjectException + * @var \metastore\NoSuchObjectException */ public $o2 = null; /** - * @var \metastore\MetaException + * @var \metastore\InvalidObjectException */ public $o3 = null; + /** + * @var \metastore\MetaException + */ + public $o4 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -50013,21 +51544,26 @@ class ThriftHiveMetastore_alter_wm_trigger_result { 0 => array( 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\WMAlterTriggerResponse', + 'class' => '\metastore\WMCreateOrUpdateMappingResponse', ), 1 => array( 'var' => 'o1', 'type' => TType::STRUCT, - 'class' => '\metastore\NoSuchObjectException', + 'class' => '\metastore\AlreadyExistsException', ), 2 => array( 'var' => 'o2', 'type' => TType::STRUCT, - 'class' => '\metastore\InvalidObjectException', + 'class' => '\metastore\NoSuchObjectException', ), 3 => array( 'var' => 'o3', 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidObjectException', + ), + 4 => array( + 'var' => 'o4', + 'type' => TType::STRUCT, 'class' => '\metastore\MetaException', ), ); @@ -50045,11 +51581,14 @@ class ThriftHiveMetastore_alter_wm_trigger_result { if (isset($vals['o3'])) { $this->o3 = $vals['o3']; } + if (isset($vals['o4'])) { + $this->o4 = $vals['o4']; + } } } public function getName() { - return 'ThriftHiveMetastore_alter_wm_trigger_result'; + return 'ThriftHiveMetastore_create_or_update_wm_mapping_result'; } public function read($input) @@ -50069,7 +51608,7 @@ class ThriftHiveMetastore_alter_wm_trigger_result { { case 0: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\WMAlterTriggerResponse(); + $this->success = new \metastore\WMCreateOrUpdateMappingResponse(); $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); @@ -50077,7 +51616,7 @@ class ThriftHiveMetastore_alter_wm_trigger_result { break; case 1: if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\NoSuchObjectException(); + $this->o1 = new \metastore\AlreadyExistsException(); $xfer += $this->o1->read($input); } else { $xfer += $input->skip($ftype); @@ -50085,7 +51624,7 @@ class ThriftHiveMetastore_alter_wm_trigger_result { break; case 2: if ($ftype == TType::STRUCT) { - $this->o2 = new \metastore\InvalidObjectException(); + $this->o2 = new \metastore\NoSuchObjectException(); $xfer += $this->o2->read($input); } else { $xfer += $input->skip($ftype); @@ -50093,12 +51632,20 @@ class ThriftHiveMetastore_alter_wm_trigger_result { break; case 3: if ($ftype == TType::STRUCT) { - $this->o3 = new \metastore\MetaException(); + $this->o3 = new \metastore\InvalidObjectException(); $xfer += $this->o3->read($input); } else { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRUCT) { + $this->o4 = new \metastore\MetaException(); + $xfer += $this->o4->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -50111,7 +51658,7 @@ class ThriftHiveMetastore_alter_wm_trigger_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_wm_trigger_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_or_update_wm_mapping_result'); if ($this->success !== null) { if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -50135,6 +51682,11 @@ class ThriftHiveMetastore_alter_wm_trigger_result { $xfer += $this->o3->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->o4 !== null) { + $xfer += $output->writeFieldBegin('o4', TType::STRUCT, 4); + $xfer += $this->o4->write($output); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -50142,11 +51694,11 @@ class ThriftHiveMetastore_alter_wm_trigger_result { } -class ThriftHiveMetastore_drop_wm_trigger_args { +class ThriftHiveMetastore_drop_wm_mapping_args { static $_TSPEC; /** - * @var \metastore\WMDropTriggerRequest + * @var \metastore\WMDropMappingRequest */ public $request = null; @@ -50156,7 +51708,7 @@ class ThriftHiveMetastore_drop_wm_trigger_args { 1 => array( 'var' => 'request', 'type' => TType::STRUCT, - 'class' => '\metastore\WMDropTriggerRequest', + 'class' => '\metastore\WMDropMappingRequest', ), ); } @@ -50168,7 +51720,7 @@ class ThriftHiveMetastore_drop_wm_trigger_args { } public function getName() { - return 'ThriftHiveMetastore_drop_wm_trigger_args'; + return 'ThriftHiveMetastore_drop_wm_mapping_args'; } public function read($input) @@ -50188,7 +51740,7 @@ class ThriftHiveMetastore_drop_wm_trigger_args { { case 1: if ($ftype == TType::STRUCT) { - $this->request = new \metastore\WMDropTriggerRequest(); + $this->request = new \metastore\WMDropMappingRequest(); $xfer += $this->request->read($input); } else { $xfer += $input->skip($ftype); @@ -50206,7 +51758,7 @@ class ThriftHiveMetastore_drop_wm_trigger_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_wm_trigger_args'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_wm_mapping_args'); if ($this->request !== null) { if (!is_object($this->request)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -50222,11 +51774,11 @@ class ThriftHiveMetastore_drop_wm_trigger_args { } -class ThriftHiveMetastore_drop_wm_trigger_result { +class ThriftHiveMetastore_drop_wm_mapping_result { static $_TSPEC; /** - * @var \metastore\WMDropTriggerResponse + * @var \metastore\WMDropMappingResponse */ public $success = null; /** @@ -50248,7 +51800,7 @@ class ThriftHiveMetastore_drop_wm_trigger_result { 0 => array( 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\WMDropTriggerResponse', + 'class' => '\metastore\WMDropMappingResponse', ), 1 => array( 'var' => 'o1', @@ -50284,7 +51836,7 @@ class ThriftHiveMetastore_drop_wm_trigger_result { } public function getName() { - return 'ThriftHiveMetastore_drop_wm_trigger_result'; + return 'ThriftHiveMetastore_drop_wm_mapping_result'; } public function read($input) @@ -50304,7 +51856,7 @@ class ThriftHiveMetastore_drop_wm_trigger_result { { case 0: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\WMDropTriggerResponse(); + $this->success = new \metastore\WMDropMappingResponse(); $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); @@ -50346,7 +51898,7 @@ class ThriftHiveMetastore_drop_wm_trigger_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_wm_trigger_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_wm_mapping_result'); if ($this->success !== null) { if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -50377,11 +51929,11 @@ class ThriftHiveMetastore_drop_wm_trigger_result { } -class ThriftHiveMetastore_get_triggers_for_resourceplan_args { +class ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args { static $_TSPEC; /** - * @var \metastore\WMGetTriggersForResourePlanRequest + * @var \metastore\WMCreateOrDropTriggerToPoolMappingRequest */ public $request = null; @@ -50391,7 +51943,7 @@ class ThriftHiveMetastore_get_triggers_for_resourceplan_args { 1 => array( 'var' => 'request', 'type' => TType::STRUCT, - 'class' => '\metastore\WMGetTriggersForResourePlanRequest', + 'class' => '\metastore\WMCreateOrDropTriggerToPoolMappingRequest', ), ); } @@ -50403,7 +51955,7 @@ class ThriftHiveMetastore_get_triggers_for_resourceplan_args { } public function getName() { - return 'ThriftHiveMetastore_get_triggers_for_resourceplan_args'; + return 'ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args'; } public function read($input) @@ -50423,7 +51975,7 @@ class ThriftHiveMetastore_get_triggers_for_resourceplan_args { { case 1: if ($ftype == TType::STRUCT) { - $this->request = new \metastore\WMGetTriggersForResourePlanRequest(); + $this->request = new \metastore\WMCreateOrDropTriggerToPoolMappingRequest(); $xfer += $this->request->read($input); } else { $xfer += $input->skip($ftype); @@ -50441,7 +51993,7 @@ class ThriftHiveMetastore_get_triggers_for_resourceplan_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_triggers_for_resourceplan_args'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_args'); if ($this->request !== null) { if (!is_object($this->request)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -50457,21 +52009,29 @@ class ThriftHiveMetastore_get_triggers_for_resourceplan_args { } -class ThriftHiveMetastore_get_triggers_for_resourceplan_result { +class ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result { static $_TSPEC; /** - * @var \metastore\WMGetTriggersForResourePlanResponse + * @var \metastore\WMCreateOrDropTriggerToPoolMappingResponse */ public $success = null; /** - * @var \metastore\NoSuchObjectException + * @var \metastore\AlreadyExistsException */ public $o1 = null; /** - * @var \metastore\MetaException + * @var \metastore\NoSuchObjectException */ public $o2 = null; + /** + * @var \metastore\InvalidObjectException + */ + public $o3 = null; + /** + * @var \metastore\MetaException + */ + public $o4 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -50479,16 +52039,26 @@ class ThriftHiveMetastore_get_triggers_for_resourceplan_result { 0 => array( 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\WMGetTriggersForResourePlanResponse', + 'class' => '\metastore\WMCreateOrDropTriggerToPoolMappingResponse', ), 1 => array( 'var' => 'o1', 'type' => TType::STRUCT, - 'class' => '\metastore\NoSuchObjectException', + 'class' => '\metastore\AlreadyExistsException', ), 2 => array( 'var' => 'o2', 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidObjectException', + ), + 4 => array( + 'var' => 'o4', + 'type' => TType::STRUCT, 'class' => '\metastore\MetaException', ), ); @@ -50503,11 +52073,17 @@ class ThriftHiveMetastore_get_triggers_for_resourceplan_result { if (isset($vals['o2'])) { $this->o2 = $vals['o2']; } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + if (isset($vals['o4'])) { + $this->o4 = $vals['o4']; + } } } public function getName() { - return 'ThriftHiveMetastore_get_triggers_for_resourceplan_result'; + return 'ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result'; } public function read($input) @@ -50527,7 +52103,7 @@ class ThriftHiveMetastore_get_triggers_for_resourceplan_result { { case 0: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\WMGetTriggersForResourePlanResponse(); + $this->success = new \metastore\WMCreateOrDropTriggerToPoolMappingResponse(); $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); @@ -50535,7 +52111,7 @@ class ThriftHiveMetastore_get_triggers_for_resourceplan_result { break; case 1: if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\NoSuchObjectException(); + $this->o1 = new \metastore\AlreadyExistsException(); $xfer += $this->o1->read($input); } else { $xfer += $input->skip($ftype); @@ -50543,12 +52119,28 @@ class ThriftHiveMetastore_get_triggers_for_resourceplan_result { break; case 2: if ($ftype == TType::STRUCT) { - $this->o2 = new \metastore\MetaException(); + $this->o2 = new \metastore\NoSuchObjectException(); $xfer += $this->o2->read($input); } else { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\InvalidObjectException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRUCT) { + $this->o4 = new \metastore\MetaException(); + $xfer += $this->o4->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -50561,7 +52153,7 @@ class ThriftHiveMetastore_get_triggers_for_resourceplan_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_triggers_for_resourceplan_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_result'); if ($this->success !== null) { if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -50580,6 +52172,16 @@ class ThriftHiveMetastore_get_triggers_for_resourceplan_result { $xfer += $this->o2->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o4 !== null) { + $xfer += $output->writeFieldBegin('o4', TType::STRUCT, 4); + $xfer += $this->o4->write($output); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php index 8dc556b2f1..150b68fa8f 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -20916,7 +20916,7 @@ class WMMapping { /** * @var string */ - public $poolName = null; + public $poolPath = null; /** * @var int */ @@ -20938,7 +20938,7 @@ class WMMapping { 'type' => TType::STRING, ), 4 => array( - 'var' => 'poolName', + 'var' => 'poolPath', 'type' => TType::STRING, ), 5 => array( @@ -20957,8 +20957,8 @@ class WMMapping { if (isset($vals['entityName'])) { $this->entityName = $vals['entityName']; } - if (isset($vals['poolName'])) { - $this->poolName = $vals['poolName']; + if (isset($vals['poolPath'])) { + $this->poolPath = $vals['poolPath']; } if (isset($vals['ordering'])) { $this->ordering = $vals['ordering']; @@ -21008,7 +21008,7 @@ class WMMapping { break; case 4: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->poolName); + $xfer += $input->readString($this->poolPath); } else { $xfer += $input->skip($ftype); } @@ -21048,9 +21048,9 @@ class WMMapping { $xfer += $output->writeString($this->entityName); $xfer += $output->writeFieldEnd(); } - if ($this->poolName !== null) { - $xfer += $output->writeFieldBegin('poolName', TType::STRING, 4); - $xfer += $output->writeString($this->poolName); + if ($this->poolPath !== null) { + $xfer += $output->writeFieldBegin('poolPath', TType::STRING, 4); + $xfer += $output->writeString($this->poolPath); $xfer += $output->writeFieldEnd(); } if ($this->ordering !== null) { @@ -23082,6 +23082,807 @@ class WMGetTriggersForResourePlanResponse { } +class WMCreateOrUpdatePoolRequest { + static $_TSPEC; + + /** + * @var \metastore\WMPool + */ + public $pool = null; + /** + * @var string + */ + public $newPoolPath = null; + /** + * @var bool + */ + public $update = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'pool', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMPool', + ), + 2 => array( + 'var' => 'newPoolPath', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'update', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['pool'])) { + $this->pool = $vals['pool']; + } + if (isset($vals['newPoolPath'])) { + $this->newPoolPath = $vals['newPoolPath']; + } + if (isset($vals['update'])) { + $this->update = $vals['update']; + } + } + } + + public function getName() { + return 'WMCreateOrUpdatePoolRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->pool = new \metastore\WMPool(); + $xfer += $this->pool->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->newPoolPath); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->update); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('WMCreateOrUpdatePoolRequest'); + if ($this->pool !== null) { + if (!is_object($this->pool)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('pool', TType::STRUCT, 1); + $xfer += $this->pool->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->newPoolPath !== null) { + $xfer += $output->writeFieldBegin('newPoolPath', TType::STRING, 2); + $xfer += $output->writeString($this->newPoolPath); + $xfer += $output->writeFieldEnd(); + } + if ($this->update !== null) { + $xfer += $output->writeFieldBegin('update', TType::BOOL, 3); + $xfer += $output->writeBool($this->update); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class WMCreateOrUpdatePoolResponse { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'WMCreateOrUpdatePoolResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('WMCreateOrUpdatePoolResponse'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class WMDropPoolRequest { + static $_TSPEC; + + /** + * @var string + */ + public $resourcePlanName = null; + /** + * @var string + */ + public $poolPath = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'resourcePlanName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'poolPath', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['resourcePlanName'])) { + $this->resourcePlanName = $vals['resourcePlanName']; + } + if (isset($vals['poolPath'])) { + $this->poolPath = $vals['poolPath']; + } + } + } + + public function getName() { + return 'WMDropPoolRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->resourcePlanName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->poolPath); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('WMDropPoolRequest'); + if ($this->resourcePlanName !== null) { + $xfer += $output->writeFieldBegin('resourcePlanName', TType::STRING, 1); + $xfer += $output->writeString($this->resourcePlanName); + $xfer += $output->writeFieldEnd(); + } + if ($this->poolPath !== null) { + $xfer += $output->writeFieldBegin('poolPath', TType::STRING, 2); + $xfer += $output->writeString($this->poolPath); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class WMDropPoolResponse { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'WMDropPoolResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('WMDropPoolResponse'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class WMCreateOrUpdateMappingRequest { + static $_TSPEC; + + /** + * @var \metastore\WMMapping + */ + public $mapping = null; + /** + * @var bool + */ + public $update = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'mapping', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMMapping', + ), + 2 => array( + 'var' => 'update', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['mapping'])) { + $this->mapping = $vals['mapping']; + } + if (isset($vals['update'])) { + $this->update = $vals['update']; + } + } + } + + public function getName() { + return 'WMCreateOrUpdateMappingRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->mapping = new \metastore\WMMapping(); + $xfer += $this->mapping->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->update); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('WMCreateOrUpdateMappingRequest'); + if ($this->mapping !== null) { + if (!is_object($this->mapping)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('mapping', TType::STRUCT, 1); + $xfer += $this->mapping->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->update !== null) { + $xfer += $output->writeFieldBegin('update', TType::BOOL, 2); + $xfer += $output->writeBool($this->update); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class WMCreateOrUpdateMappingResponse { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'WMCreateOrUpdateMappingResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('WMCreateOrUpdateMappingResponse'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class WMDropMappingRequest { + static $_TSPEC; + + /** + * @var \metastore\WMMapping + */ + public $mapping = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'mapping', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMMapping', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['mapping'])) { + $this->mapping = $vals['mapping']; + } + } + } + + public function getName() { + return 'WMDropMappingRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->mapping = new \metastore\WMMapping(); + $xfer += $this->mapping->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('WMDropMappingRequest'); + if ($this->mapping !== null) { + if (!is_object($this->mapping)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('mapping', TType::STRUCT, 1); + $xfer += $this->mapping->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class WMDropMappingResponse { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'WMDropMappingResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('WMDropMappingResponse'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class WMCreateOrDropTriggerToPoolMappingRequest { + static $_TSPEC; + + /** + * @var string + */ + public $resourcePlanName = null; + /** + * @var string + */ + public $triggerName = null; + /** + * @var string + */ + public $poolPath = null; + /** + * @var bool + */ + public $drop = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'resourcePlanName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'triggerName', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'poolPath', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'drop', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['resourcePlanName'])) { + $this->resourcePlanName = $vals['resourcePlanName']; + } + if (isset($vals['triggerName'])) { + $this->triggerName = $vals['triggerName']; + } + if (isset($vals['poolPath'])) { + $this->poolPath = $vals['poolPath']; + } + if (isset($vals['drop'])) { + $this->drop = $vals['drop']; + } + } + } + + public function getName() { + return 'WMCreateOrDropTriggerToPoolMappingRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->resourcePlanName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->triggerName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->poolPath); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->drop); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('WMCreateOrDropTriggerToPoolMappingRequest'); + if ($this->resourcePlanName !== null) { + $xfer += $output->writeFieldBegin('resourcePlanName', TType::STRING, 1); + $xfer += $output->writeString($this->resourcePlanName); + $xfer += $output->writeFieldEnd(); + } + if ($this->triggerName !== null) { + $xfer += $output->writeFieldBegin('triggerName', TType::STRING, 2); + $xfer += $output->writeString($this->triggerName); + $xfer += $output->writeFieldEnd(); + } + if ($this->poolPath !== null) { + $xfer += $output->writeFieldBegin('poolPath', TType::STRING, 3); + $xfer += $output->writeString($this->poolPath); + $xfer += $output->writeFieldEnd(); + } + if ($this->drop !== null) { + $xfer += $output->writeFieldBegin('drop', TType::BOOL, 4); + $xfer += $output->writeBool($this->drop); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class WMCreateOrDropTriggerToPoolMappingResponse { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'WMCreateOrDropTriggerToPoolMappingResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('WMCreateOrDropTriggerToPoolMappingResponse'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class MetaException extends TException { static $_TSPEC; diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 9cc9d1c148..c0a4dbb0b0 100755 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -197,6 +197,11 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' WMAlterTriggerResponse alter_wm_trigger(WMAlterTriggerRequest request)') print(' WMDropTriggerResponse drop_wm_trigger(WMDropTriggerRequest request)') print(' WMGetTriggersForResourePlanResponse get_triggers_for_resourceplan(WMGetTriggersForResourePlanRequest request)') + print(' WMCreateOrUpdatePoolResponse create_or_update_wm_pool(WMCreateOrUpdatePoolRequest request)') + print(' WMDropPoolResponse drop_wm_pool(WMDropPoolRequest request)') + print(' WMCreateOrUpdateMappingResponse create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request)') + print(' WMDropMappingResponse drop_wm_mapping(WMDropMappingRequest request)') + print(' WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request)') print(' string getName()') print(' string getVersion()') print(' fb_status getStatus()') @@ -1304,6 +1309,36 @@ elif cmd == 'get_triggers_for_resourceplan': sys.exit(1) pp.pprint(client.get_triggers_for_resourceplan(eval(args[0]),)) +elif cmd == 'create_or_update_wm_pool': + if len(args) != 1: + print('create_or_update_wm_pool requires 1 args') + sys.exit(1) + pp.pprint(client.create_or_update_wm_pool(eval(args[0]),)) + +elif cmd == 'drop_wm_pool': + if len(args) != 1: + print('drop_wm_pool requires 1 args') + sys.exit(1) + pp.pprint(client.drop_wm_pool(eval(args[0]),)) + +elif cmd == 'create_or_update_wm_mapping': + if len(args) != 1: + print('create_or_update_wm_mapping requires 1 args') + sys.exit(1) + pp.pprint(client.create_or_update_wm_mapping(eval(args[0]),)) + +elif cmd == 'drop_wm_mapping': + if len(args) != 1: + print('drop_wm_mapping requires 1 args') + sys.exit(1) + pp.pprint(client.drop_wm_mapping(eval(args[0]),)) + +elif cmd == 'create_or_drop_wm_trigger_to_pool_mapping': + if len(args) != 1: + print('create_or_drop_wm_trigger_to_pool_mapping requires 1 args') + sys.exit(1) + pp.pprint(client.create_or_drop_wm_trigger_to_pool_mapping(eval(args[0]),)) + elif cmd == 'getName': if len(args) != 0: print('getName requires 0 args') diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 0351cd8e99..10b4a20d78 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -1370,6 +1370,41 @@ def get_triggers_for_resourceplan(self, request): """ pass + def create_or_update_wm_pool(self, request): + """ + Parameters: + - request + """ + pass + + def drop_wm_pool(self, request): + """ + Parameters: + - request + """ + pass + + def create_or_update_wm_mapping(self, request): + """ + Parameters: + - request + """ + pass + + def drop_wm_mapping(self, request): + """ + Parameters: + - request + """ + pass + + def create_or_drop_wm_trigger_to_pool_mapping(self, request): + """ + Parameters: + - request + """ + pass + class Client(fb303.FacebookService.Client, Iface): """ @@ -7599,6 +7634,197 @@ def recv_get_triggers_for_resourceplan(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_triggers_for_resourceplan failed: unknown result") + def create_or_update_wm_pool(self, request): + """ + Parameters: + - request + """ + self.send_create_or_update_wm_pool(request) + return self.recv_create_or_update_wm_pool() + + def send_create_or_update_wm_pool(self, request): + self._oprot.writeMessageBegin('create_or_update_wm_pool', TMessageType.CALL, self._seqid) + args = create_or_update_wm_pool_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_or_update_wm_pool(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_or_update_wm_pool_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "create_or_update_wm_pool failed: unknown result") + + def drop_wm_pool(self, request): + """ + Parameters: + - request + """ + self.send_drop_wm_pool(request) + return self.recv_drop_wm_pool() + + def send_drop_wm_pool(self, request): + self._oprot.writeMessageBegin('drop_wm_pool', TMessageType.CALL, self._seqid) + args = drop_wm_pool_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_wm_pool(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_wm_pool_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_wm_pool failed: unknown result") + + def create_or_update_wm_mapping(self, request): + """ + Parameters: + - request + """ + self.send_create_or_update_wm_mapping(request) + return self.recv_create_or_update_wm_mapping() + + def send_create_or_update_wm_mapping(self, request): + self._oprot.writeMessageBegin('create_or_update_wm_mapping', TMessageType.CALL, self._seqid) + args = create_or_update_wm_mapping_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_or_update_wm_mapping(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_or_update_wm_mapping_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "create_or_update_wm_mapping failed: unknown result") + + def drop_wm_mapping(self, request): + """ + Parameters: + - request + """ + self.send_drop_wm_mapping(request) + return self.recv_drop_wm_mapping() + + def send_drop_wm_mapping(self, request): + self._oprot.writeMessageBegin('drop_wm_mapping', TMessageType.CALL, self._seqid) + args = drop_wm_mapping_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_wm_mapping(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_wm_mapping_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_wm_mapping failed: unknown result") + + def create_or_drop_wm_trigger_to_pool_mapping(self, request): + """ + Parameters: + - request + """ + self.send_create_or_drop_wm_trigger_to_pool_mapping(request) + return self.recv_create_or_drop_wm_trigger_to_pool_mapping() + + def send_create_or_drop_wm_trigger_to_pool_mapping(self, request): + self._oprot.writeMessageBegin('create_or_drop_wm_trigger_to_pool_mapping', TMessageType.CALL, self._seqid) + args = create_or_drop_wm_trigger_to_pool_mapping_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_or_drop_wm_trigger_to_pool_mapping(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_or_drop_wm_trigger_to_pool_mapping_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "create_or_drop_wm_trigger_to_pool_mapping failed: unknown result") + class Processor(fb303.FacebookService.Processor, Iface, TProcessor): def __init__(self, handler): @@ -7776,6 +8002,11 @@ def __init__(self, handler): self._processMap["alter_wm_trigger"] = Processor.process_alter_wm_trigger self._processMap["drop_wm_trigger"] = Processor.process_drop_wm_trigger self._processMap["get_triggers_for_resourceplan"] = Processor.process_get_triggers_for_resourceplan + self._processMap["create_or_update_wm_pool"] = Processor.process_create_or_update_wm_pool + self._processMap["drop_wm_pool"] = Processor.process_drop_wm_pool + self._processMap["create_or_update_wm_mapping"] = Processor.process_create_or_update_wm_mapping + self._processMap["drop_wm_mapping"] = Processor.process_drop_wm_mapping + self._processMap["create_or_drop_wm_trigger_to_pool_mapping"] = Processor.process_create_or_drop_wm_trigger_to_pool_mapping def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() @@ -12027,6 +12258,155 @@ def process_get_triggers_for_resourceplan(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_create_or_update_wm_pool(self, seqid, iprot, oprot): + args = create_or_update_wm_pool_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_or_update_wm_pool_result() + try: + result.success = self._handler.create_or_update_wm_pool(args.request) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except InvalidObjectException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except MetaException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("create_or_update_wm_pool", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_wm_pool(self, seqid, iprot, oprot): + args = drop_wm_pool_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_wm_pool_result() + try: + result.success = self._handler.drop_wm_pool(args.request) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("drop_wm_pool", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_or_update_wm_mapping(self, seqid, iprot, oprot): + args = create_or_update_wm_mapping_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_or_update_wm_mapping_result() + try: + result.success = self._handler.create_or_update_wm_mapping(args.request) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except InvalidObjectException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except MetaException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("create_or_update_wm_mapping", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_wm_mapping(self, seqid, iprot, oprot): + args = drop_wm_mapping_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_wm_mapping_result() + try: + result.success = self._handler.drop_wm_mapping(args.request) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("drop_wm_mapping", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_or_drop_wm_trigger_to_pool_mapping(self, seqid, iprot, oprot): + args = create_or_drop_wm_trigger_to_pool_mapping_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_or_drop_wm_trigger_to_pool_mapping_result() + try: + result.success = self._handler.create_or_drop_wm_trigger_to_pool_mapping(args.request) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except InvalidObjectException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except MetaException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("create_or_drop_wm_trigger_to_pool_mapping", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + # HELPER FUNCTIONS AND STRUCTURES @@ -31465,22 +31845,1276 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_function_args: +class get_function_args: + """ + Attributes: + - dbName + - funcName + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbName', None, None, ), # 1 + (2, TType.STRING, 'funcName', None, None, ), # 2 + ) + + def __init__(self, dbName=None, funcName=None,): + self.dbName = dbName + self.funcName = funcName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.funcName = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_function_args') + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.funcName is not None: + oprot.writeFieldBegin('funcName', TType.STRING, 2) + oprot.writeString(self.funcName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.funcName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_function_result: + """ + Attributes: + - success + - o1 + - o2 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (Function, Function.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 + ) + + def __init__(self, success=None, o1=None, o2=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Function() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_function_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_all_functions_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_all_functions_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_all_functions_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (GetAllFunctionsResponse, GetAllFunctionsResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetAllFunctionsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_all_functions_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class create_role_args: + """ + Attributes: + - role + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'role', (Role, Role.thrift_spec), None, ), # 1 + ) + + def __init__(self, role=None,): + self.role = role + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.role = Role() + self.role.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('create_role_args') + if self.role is not None: + oprot.writeFieldBegin('role', TType.STRUCT, 1) + self.role.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.role) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class create_role_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.BOOL, 'success', None, None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('create_role_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class drop_role_args: + """ + Attributes: + - role_name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'role_name', None, None, ), # 1 + ) + + def __init__(self, role_name=None,): + self.role_name = role_name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.role_name = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('drop_role_args') + if self.role_name is not None: + oprot.writeFieldBegin('role_name', TType.STRING, 1) + oprot.writeString(self.role_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.role_name) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class drop_role_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.BOOL, 'success', None, None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('drop_role_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_role_names_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_role_names_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_role_names_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1120, _size1117) = iprot.readListBegin() + for _i1121 in xrange(_size1117): + _elem1122 = iprot.readString() + self.success.append(_elem1122) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_role_names_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1123 in self.success: + oprot.writeString(iter1123) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class grant_role_args: + """ + Attributes: + - role_name + - principal_name + - principal_type + - grantor + - grantorType + - grant_option + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'role_name', None, None, ), # 1 + (2, TType.STRING, 'principal_name', None, None, ), # 2 + (3, TType.I32, 'principal_type', None, None, ), # 3 + (4, TType.STRING, 'grantor', None, None, ), # 4 + (5, TType.I32, 'grantorType', None, None, ), # 5 + (6, TType.BOOL, 'grant_option', None, None, ), # 6 + ) + + def __init__(self, role_name=None, principal_name=None, principal_type=None, grantor=None, grantorType=None, grant_option=None,): + self.role_name = role_name + self.principal_name = principal_name + self.principal_type = principal_type + self.grantor = grantor + self.grantorType = grantorType + self.grant_option = grant_option + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.role_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.principal_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.principal_type = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.grantor = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.grantorType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.BOOL: + self.grant_option = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('grant_role_args') + if self.role_name is not None: + oprot.writeFieldBegin('role_name', TType.STRING, 1) + oprot.writeString(self.role_name) + oprot.writeFieldEnd() + if self.principal_name is not None: + oprot.writeFieldBegin('principal_name', TType.STRING, 2) + oprot.writeString(self.principal_name) + oprot.writeFieldEnd() + if self.principal_type is not None: + oprot.writeFieldBegin('principal_type', TType.I32, 3) + oprot.writeI32(self.principal_type) + oprot.writeFieldEnd() + if self.grantor is not None: + oprot.writeFieldBegin('grantor', TType.STRING, 4) + oprot.writeString(self.grantor) + oprot.writeFieldEnd() + if self.grantorType is not None: + oprot.writeFieldBegin('grantorType', TType.I32, 5) + oprot.writeI32(self.grantorType) + oprot.writeFieldEnd() + if self.grant_option is not None: + oprot.writeFieldBegin('grant_option', TType.BOOL, 6) + oprot.writeBool(self.grant_option) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.role_name) + value = (value * 31) ^ hash(self.principal_name) + value = (value * 31) ^ hash(self.principal_type) + value = (value * 31) ^ hash(self.grantor) + value = (value * 31) ^ hash(self.grantorType) + value = (value * 31) ^ hash(self.grant_option) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class grant_role_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.BOOL, 'success', None, None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('grant_role_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class revoke_role_args: + """ + Attributes: + - role_name + - principal_name + - principal_type + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'role_name', None, None, ), # 1 + (2, TType.STRING, 'principal_name', None, None, ), # 2 + (3, TType.I32, 'principal_type', None, None, ), # 3 + ) + + def __init__(self, role_name=None, principal_name=None, principal_type=None,): + self.role_name = role_name + self.principal_name = principal_name + self.principal_type = principal_type + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.role_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.principal_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.principal_type = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('revoke_role_args') + if self.role_name is not None: + oprot.writeFieldBegin('role_name', TType.STRING, 1) + oprot.writeString(self.role_name) + oprot.writeFieldEnd() + if self.principal_name is not None: + oprot.writeFieldBegin('principal_name', TType.STRING, 2) + oprot.writeString(self.principal_name) + oprot.writeFieldEnd() + if self.principal_type is not None: + oprot.writeFieldBegin('principal_type', TType.I32, 3) + oprot.writeI32(self.principal_type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.role_name) + value = (value * 31) ^ hash(self.principal_name) + value = (value * 31) ^ hash(self.principal_type) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class revoke_role_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.BOOL, 'success', None, None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('revoke_role_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class list_roles_args: + """ + Attributes: + - principal_name + - principal_type + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'principal_name', None, None, ), # 1 + (2, TType.I32, 'principal_type', None, None, ), # 2 + ) + + def __init__(self, principal_name=None, principal_type=None,): + self.principal_name = principal_name + self.principal_type = principal_type + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.principal_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.principal_type = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('list_roles_args') + if self.principal_name is not None: + oprot.writeFieldBegin('principal_name', TType.STRING, 1) + oprot.writeString(self.principal_name) + oprot.writeFieldEnd() + if self.principal_type is not None: + oprot.writeFieldBegin('principal_type', TType.I32, 2) + oprot.writeI32(self.principal_type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.principal_name) + value = (value * 31) ^ hash(self.principal_type) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class list_roles_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRUCT,(Role, Role.thrift_spec)), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1127, _size1124) = iprot.readListBegin() + for _i1128 in xrange(_size1124): + _elem1129 = Role() + _elem1129.read(iprot) + self.success.append(_elem1129) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('list_roles_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1130 in self.success: + iter1130.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class grant_revoke_role_args: """ Attributes: - - dbName - - funcName + - request """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'dbName', None, None, ), # 1 - (2, TType.STRING, 'funcName', None, None, ), # 2 + (1, TType.STRUCT, 'request', (GrantRevokeRoleRequest, GrantRevokeRoleRequest.thrift_spec), None, ), # 1 ) - def __init__(self, dbName=None, funcName=None,): - self.dbName = dbName - self.funcName = funcName + def __init__(self, request=None,): + self.request = request def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -31492,13 +33126,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.dbName = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.funcName = iprot.readString() + if ftype == TType.STRUCT: + self.request = GrantRevokeRoleRequest() + self.request.read(iprot) else: iprot.skip(ftype) else: @@ -31510,14 +33140,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_function_args') - if self.dbName is not None: - oprot.writeFieldBegin('dbName', TType.STRING, 1) - oprot.writeString(self.dbName) - oprot.writeFieldEnd() - if self.funcName is not None: - oprot.writeFieldBegin('funcName', TType.STRING, 2) - oprot.writeString(self.funcName) + oprot.writeStructBegin('grant_revoke_role_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -31528,8 +33154,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.dbName) - value = (value * 31) ^ hash(self.funcName) + value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -31543,24 +33168,21 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_function_result: +class grant_revoke_role_result: """ Attributes: - success - o1 - - o2 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (Function, Function.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (GrantRevokeRoleResponse, GrantRevokeRoleResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None, o1=None, o2=None,): + def __init__(self, success=None, o1=None,): self.success = success self.o1 = o1 - self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -31573,7 +33195,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = Function() + self.success = GrantRevokeRoleResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -31583,12 +33205,6 @@ def read(self, iprot): self.o1.read(iprot) else: iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRUCT: - self.o2 = NoSuchObjectException() - self.o2.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -31598,7 +33214,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_function_result') + oprot.writeStructBegin('grant_revoke_role_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -31607,10 +33223,6 @@ def write(self, oprot): oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -31622,7 +33234,6 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -31636,11 +33247,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_all_functions_args: +class get_principals_in_role_args: + """ + Attributes: + - request + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'request', (GetPrincipalsInRoleRequest, GetPrincipalsInRoleRequest.thrift_spec), None, ), # 1 ) + def __init__(self, request=None,): + self.request = request + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -31650,6 +33270,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.request = GetPrincipalsInRoleRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -31659,7 +33285,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_all_functions_args') + oprot.writeStructBegin('get_principals_in_role_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -31669,6 +33299,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -31682,7 +33313,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_all_functions_result: +class get_principals_in_role_result: """ Attributes: - success @@ -31690,7 +33321,7 @@ class get_all_functions_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetAllFunctionsResponse, GetAllFunctionsResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (GetPrincipalsInRoleResponse, GetPrincipalsInRoleResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -31709,7 +33340,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = GetAllFunctionsResponse() + self.success = GetPrincipalsInRoleResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -31728,7 +33359,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_all_functions_result') + oprot.writeStructBegin('get_principals_in_role_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -31761,19 +33392,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class create_role_args: +class get_role_grants_for_principal_args: """ Attributes: - - role + - request """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'role', (Role, Role.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (GetRoleGrantsForPrincipalRequest, GetRoleGrantsForPrincipalRequest.thrift_spec), None, ), # 1 ) - def __init__(self, role=None,): - self.role = role + def __init__(self, request=None,): + self.request = request def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -31786,8 +33417,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.role = Role() - self.role.read(iprot) + self.request = GetRoleGrantsForPrincipalRequest() + self.request.read(iprot) else: iprot.skip(ftype) else: @@ -31799,10 +33430,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('create_role_args') - if self.role is not None: - oprot.writeFieldBegin('role', TType.STRUCT, 1) - self.role.write(oprot) + oprot.writeStructBegin('get_role_grants_for_principal_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -31813,7 +33444,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.role) + value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -31827,7 +33458,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class create_role_result: +class get_role_grants_for_principal_result: """ Attributes: - success @@ -31835,7 +33466,7 @@ class create_role_result: """ thrift_spec = ( - (0, TType.BOOL, 'success', None, None, ), # 0 + (0, TType.STRUCT, 'success', (GetRoleGrantsForPrincipalResponse, GetRoleGrantsForPrincipalResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -31853,8 +33484,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.BOOL: - self.success = iprot.readBool() + if ftype == TType.STRUCT: + self.success = GetRoleGrantsForPrincipalResponse() + self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: @@ -31872,10 +33504,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('create_role_result') + oprot.writeStructBegin('get_role_grants_for_principal_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.BOOL, 0) - oprot.writeBool(self.success) + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -31905,19 +33537,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class drop_role_args: +class get_privilege_set_args: """ Attributes: - - role_name + - hiveObject + - user_name + - group_names """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'role_name', None, None, ), # 1 + (1, TType.STRUCT, 'hiveObject', (HiveObjectRef, HiveObjectRef.thrift_spec), None, ), # 1 + (2, TType.STRING, 'user_name', None, None, ), # 2 + (3, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 3 ) - def __init__(self, role_name=None,): - self.role_name = role_name + def __init__(self, hiveObject=None, user_name=None, group_names=None,): + self.hiveObject = hiveObject + self.user_name = user_name + self.group_names = group_names def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -31929,8 +33567,24 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: + if ftype == TType.STRUCT: + self.hiveObject = HiveObjectRef() + self.hiveObject.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: if ftype == TType.STRING: - self.role_name = iprot.readString() + self.user_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.group_names = [] + (_etype1134, _size1131) = iprot.readListBegin() + for _i1135 in xrange(_size1131): + _elem1136 = iprot.readString() + self.group_names.append(_elem1136) + iprot.readListEnd() else: iprot.skip(ftype) else: @@ -31942,10 +33596,21 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('drop_role_args') - if self.role_name is not None: - oprot.writeFieldBegin('role_name', TType.STRING, 1) - oprot.writeString(self.role_name) + oprot.writeStructBegin('get_privilege_set_args') + if self.hiveObject is not None: + oprot.writeFieldBegin('hiveObject', TType.STRUCT, 1) + self.hiveObject.write(oprot) + oprot.writeFieldEnd() + if self.user_name is not None: + oprot.writeFieldBegin('user_name', TType.STRING, 2) + oprot.writeString(self.user_name) + oprot.writeFieldEnd() + if self.group_names is not None: + oprot.writeFieldBegin('group_names', TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.group_names)) + for iter1137 in self.group_names: + oprot.writeString(iter1137) + oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -31956,7 +33621,9 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.role_name) + value = (value * 31) ^ hash(self.hiveObject) + value = (value * 31) ^ hash(self.user_name) + value = (value * 31) ^ hash(self.group_names) return value def __repr__(self): @@ -31970,7 +33637,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class drop_role_result: +class get_privilege_set_result: """ Attributes: - success @@ -31978,7 +33645,7 @@ class drop_role_result: """ thrift_spec = ( - (0, TType.BOOL, 'success', None, None, ), # 0 + (0, TType.STRUCT, 'success', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -31996,8 +33663,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.BOOL: - self.success = iprot.readBool() + if ftype == TType.STRUCT: + self.success = PrincipalPrivilegeSet() + self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: @@ -32015,10 +33683,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('drop_role_result') + oprot.writeStructBegin('get_privilege_set_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.BOOL, 0) - oprot.writeBool(self.success) + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -32048,11 +33716,26 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_role_names_args: +class list_privileges_args: + """ + Attributes: + - principal_name + - principal_type + - hiveObject + """ thrift_spec = ( + None, # 0 + (1, TType.STRING, 'principal_name', None, None, ), # 1 + (2, TType.I32, 'principal_type', None, None, ), # 2 + (3, TType.STRUCT, 'hiveObject', (HiveObjectRef, HiveObjectRef.thrift_spec), None, ), # 3 ) + def __init__(self, principal_name=None, principal_type=None, hiveObject=None,): + self.principal_name = principal_name + self.principal_type = principal_type + self.hiveObject = hiveObject + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -32062,6 +33745,22 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRING: + self.principal_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.principal_type = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.hiveObject = HiveObjectRef() + self.hiveObject.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -32071,7 +33770,19 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_role_names_args') + oprot.writeStructBegin('list_privileges_args') + if self.principal_name is not None: + oprot.writeFieldBegin('principal_name', TType.STRING, 1) + oprot.writeString(self.principal_name) + oprot.writeFieldEnd() + if self.principal_type is not None: + oprot.writeFieldBegin('principal_type', TType.I32, 2) + oprot.writeI32(self.principal_type) + oprot.writeFieldEnd() + if self.hiveObject is not None: + oprot.writeFieldBegin('hiveObject', TType.STRUCT, 3) + self.hiveObject.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -32081,6 +33792,9 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.principal_name) + value = (value * 31) ^ hash(self.principal_type) + value = (value * 31) ^ hash(self.hiveObject) return value def __repr__(self): @@ -32094,7 +33808,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_role_names_result: +class list_privileges_result: """ Attributes: - success @@ -32102,7 +33816,7 @@ class get_role_names_result: """ thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (0, TType.LIST, 'success', (TType.STRUCT,(HiveObjectPrivilege, HiveObjectPrivilege.thrift_spec)), None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -32122,10 +33836,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1120, _size1117) = iprot.readListBegin() - for _i1121 in xrange(_size1117): - _elem1122 = iprot.readString() - self.success.append(_elem1122) + (_etype1141, _size1138) = iprot.readListBegin() + for _i1142 in xrange(_size1138): + _elem1143 = HiveObjectPrivilege() + _elem1143.read(iprot) + self.success.append(_elem1143) iprot.readListEnd() else: iprot.skip(ftype) @@ -32144,12 +33859,12 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_role_names_result') + oprot.writeStructBegin('list_privileges_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1123 in self.success: - oprot.writeString(iter1123) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1144 in self.success: + iter1144.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -32180,34 +33895,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_role_args: +class grant_privileges_args: """ Attributes: - - role_name - - principal_name - - principal_type - - grantor - - grantorType - - grant_option + - privileges """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'role_name', None, None, ), # 1 - (2, TType.STRING, 'principal_name', None, None, ), # 2 - (3, TType.I32, 'principal_type', None, None, ), # 3 - (4, TType.STRING, 'grantor', None, None, ), # 4 - (5, TType.I32, 'grantorType', None, None, ), # 5 - (6, TType.BOOL, 'grant_option', None, None, ), # 6 + (1, TType.STRUCT, 'privileges', (PrivilegeBag, PrivilegeBag.thrift_spec), None, ), # 1 ) - def __init__(self, role_name=None, principal_name=None, principal_type=None, grantor=None, grantorType=None, grant_option=None,): - self.role_name = role_name - self.principal_name = principal_name - self.principal_type = principal_type - self.grantor = grantor - self.grantorType = grantorType - self.grant_option = grant_option + def __init__(self, privileges=None,): + self.privileges = privileges def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -32219,33 +33919,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.role_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.principal_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.I32: - self.principal_type = iprot.readI32() - else: - iprot.skip(ftype) - elif fid == 4: - if ftype == TType.STRING: - self.grantor = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 5: - if ftype == TType.I32: - self.grantorType = iprot.readI32() - else: - iprot.skip(ftype) - elif fid == 6: - if ftype == TType.BOOL: - self.grant_option = iprot.readBool() + if ftype == TType.STRUCT: + self.privileges = PrivilegeBag() + self.privileges.read(iprot) else: iprot.skip(ftype) else: @@ -32257,30 +33933,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_role_args') - if self.role_name is not None: - oprot.writeFieldBegin('role_name', TType.STRING, 1) - oprot.writeString(self.role_name) - oprot.writeFieldEnd() - if self.principal_name is not None: - oprot.writeFieldBegin('principal_name', TType.STRING, 2) - oprot.writeString(self.principal_name) - oprot.writeFieldEnd() - if self.principal_type is not None: - oprot.writeFieldBegin('principal_type', TType.I32, 3) - oprot.writeI32(self.principal_type) - oprot.writeFieldEnd() - if self.grantor is not None: - oprot.writeFieldBegin('grantor', TType.STRING, 4) - oprot.writeString(self.grantor) - oprot.writeFieldEnd() - if self.grantorType is not None: - oprot.writeFieldBegin('grantorType', TType.I32, 5) - oprot.writeI32(self.grantorType) - oprot.writeFieldEnd() - if self.grant_option is not None: - oprot.writeFieldBegin('grant_option', TType.BOOL, 6) - oprot.writeBool(self.grant_option) + oprot.writeStructBegin('grant_privileges_args') + if self.privileges is not None: + oprot.writeFieldBegin('privileges', TType.STRUCT, 1) + self.privileges.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -32291,12 +33947,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.role_name) - value = (value * 31) ^ hash(self.principal_name) - value = (value * 31) ^ hash(self.principal_type) - value = (value * 31) ^ hash(self.grantor) - value = (value * 31) ^ hash(self.grantorType) - value = (value * 31) ^ hash(self.grant_option) + value = (value * 31) ^ hash(self.privileges) return value def __repr__(self): @@ -32310,7 +33961,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_role_result: +class grant_privileges_result: """ Attributes: - success @@ -32355,7 +34006,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_role_result') + oprot.writeStructBegin('grant_privileges_result') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) @@ -32388,25 +34039,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class revoke_role_args: +class revoke_privileges_args: """ Attributes: - - role_name - - principal_name - - principal_type + - privileges """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'role_name', None, None, ), # 1 - (2, TType.STRING, 'principal_name', None, None, ), # 2 - (3, TType.I32, 'principal_type', None, None, ), # 3 + (1, TType.STRUCT, 'privileges', (PrivilegeBag, PrivilegeBag.thrift_spec), None, ), # 1 ) - def __init__(self, role_name=None, principal_name=None, principal_type=None,): - self.role_name = role_name - self.principal_name = principal_name - self.principal_type = principal_type + def __init__(self, privileges=None,): + self.privileges = privileges def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -32418,18 +34063,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.role_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.principal_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.I32: - self.principal_type = iprot.readI32() + if ftype == TType.STRUCT: + self.privileges = PrivilegeBag() + self.privileges.read(iprot) else: iprot.skip(ftype) else: @@ -32441,18 +34077,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('revoke_role_args') - if self.role_name is not None: - oprot.writeFieldBegin('role_name', TType.STRING, 1) - oprot.writeString(self.role_name) - oprot.writeFieldEnd() - if self.principal_name is not None: - oprot.writeFieldBegin('principal_name', TType.STRING, 2) - oprot.writeString(self.principal_name) - oprot.writeFieldEnd() - if self.principal_type is not None: - oprot.writeFieldBegin('principal_type', TType.I32, 3) - oprot.writeI32(self.principal_type) + oprot.writeStructBegin('revoke_privileges_args') + if self.privileges is not None: + oprot.writeFieldBegin('privileges', TType.STRUCT, 1) + self.privileges.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -32463,9 +34091,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.role_name) - value = (value * 31) ^ hash(self.principal_name) - value = (value * 31) ^ hash(self.principal_type) + value = (value * 31) ^ hash(self.privileges) return value def __repr__(self): @@ -32479,7 +34105,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class revoke_role_result: +class revoke_privileges_result: """ Attributes: - success @@ -32524,7 +34150,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('revoke_role_result') + oprot.writeStructBegin('revoke_privileges_result') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) @@ -32557,22 +34183,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class list_roles_args: +class grant_revoke_privileges_args: """ Attributes: - - principal_name - - principal_type + - request """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'principal_name', None, None, ), # 1 - (2, TType.I32, 'principal_type', None, None, ), # 2 + (1, TType.STRUCT, 'request', (GrantRevokePrivilegeRequest, GrantRevokePrivilegeRequest.thrift_spec), None, ), # 1 ) - def __init__(self, principal_name=None, principal_type=None,): - self.principal_name = principal_name - self.principal_type = principal_type + def __init__(self, request=None,): + self.request = request def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -32584,13 +34207,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.principal_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.I32: - self.principal_type = iprot.readI32() + if ftype == TType.STRUCT: + self.request = GrantRevokePrivilegeRequest() + self.request.read(iprot) else: iprot.skip(ftype) else: @@ -32602,14 +34221,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('list_roles_args') - if self.principal_name is not None: - oprot.writeFieldBegin('principal_name', TType.STRING, 1) - oprot.writeString(self.principal_name) - oprot.writeFieldEnd() - if self.principal_type is not None: - oprot.writeFieldBegin('principal_type', TType.I32, 2) - oprot.writeI32(self.principal_type) + oprot.writeStructBegin('grant_revoke_privileges_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -32620,8 +34235,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.principal_name) - value = (value * 31) ^ hash(self.principal_type) + value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -32635,7 +34249,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class list_roles_result: +class grant_revoke_privileges_result: """ Attributes: - success @@ -32643,7 +34257,7 @@ class list_roles_result: """ thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRUCT,(Role, Role.thrift_spec)), None, ), # 0 + (0, TType.STRUCT, 'success', (GrantRevokePrivilegeResponse, GrantRevokePrivilegeResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -32661,14 +34275,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.LIST: - self.success = [] - (_etype1127, _size1124) = iprot.readListBegin() - for _i1128 in xrange(_size1124): - _elem1129 = Role() - _elem1129.read(iprot) - self.success.append(_elem1129) - iprot.readListEnd() + if ftype == TType.STRUCT: + self.success = GrantRevokePrivilegeResponse() + self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: @@ -32686,13 +34295,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('list_roles_result') + oprot.writeStructBegin('grant_revoke_privileges_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1130 in self.success: - iter1130.write(oprot) - oprot.writeListEnd() + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -32722,19 +34328,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_revoke_role_args: +class set_ugi_args: """ Attributes: - - request + - user_name + - group_names """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (GrantRevokeRoleRequest, GrantRevokeRoleRequest.thrift_spec), None, ), # 1 + (1, TType.STRING, 'user_name', None, None, ), # 1 + (2, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 2 ) - def __init__(self, request=None,): - self.request = request + def __init__(self, user_name=None, group_names=None,): + self.user_name = user_name + self.group_names = group_names def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -32746,9 +34355,18 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.request = GrantRevokeRoleRequest() - self.request.read(iprot) + if ftype == TType.STRING: + self.user_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.group_names = [] + (_etype1148, _size1145) = iprot.readListBegin() + for _i1149 in xrange(_size1145): + _elem1150 = iprot.readString() + self.group_names.append(_elem1150) + iprot.readListEnd() else: iprot.skip(ftype) else: @@ -32760,10 +34378,17 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_revoke_role_args') - if self.request is not None: - oprot.writeFieldBegin('request', TType.STRUCT, 1) - self.request.write(oprot) + oprot.writeStructBegin('set_ugi_args') + if self.user_name is not None: + oprot.writeFieldBegin('user_name', TType.STRING, 1) + oprot.writeString(self.user_name) + oprot.writeFieldEnd() + if self.group_names is not None: + oprot.writeFieldBegin('group_names', TType.LIST, 2) + oprot.writeListBegin(TType.STRING, len(self.group_names)) + for iter1151 in self.group_names: + oprot.writeString(iter1151) + oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -32774,7 +34399,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.request) + value = (value * 31) ^ hash(self.user_name) + value = (value * 31) ^ hash(self.group_names) return value def __repr__(self): @@ -32788,7 +34414,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_revoke_role_result: +class set_ugi_result: """ Attributes: - success @@ -32796,7 +34422,7 @@ class grant_revoke_role_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GrantRevokeRoleResponse, GrantRevokeRoleResponse.thrift_spec), None, ), # 0 + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -32814,9 +34440,13 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRUCT: - self.success = GrantRevokeRoleResponse() - self.success.read(iprot) + if ftype == TType.LIST: + self.success = [] + (_etype1155, _size1152) = iprot.readListBegin() + for _i1156 in xrange(_size1152): + _elem1157 = iprot.readString() + self.success.append(_elem1157) + iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: @@ -32834,10 +34464,13 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_revoke_role_result') + oprot.writeStructBegin('set_ugi_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1158 in self.success: + oprot.writeString(iter1158) + oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -32867,19 +34500,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_principals_in_role_args: +class get_delegation_token_args: """ Attributes: - - request + - token_owner + - renewer_kerberos_principal_name """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (GetPrincipalsInRoleRequest, GetPrincipalsInRoleRequest.thrift_spec), None, ), # 1 + (1, TType.STRING, 'token_owner', None, None, ), # 1 + (2, TType.STRING, 'renewer_kerberos_principal_name', None, None, ), # 2 ) - def __init__(self, request=None,): - self.request = request + def __init__(self, token_owner=None, renewer_kerberos_principal_name=None,): + self.token_owner = token_owner + self.renewer_kerberos_principal_name = renewer_kerberos_principal_name def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -32891,9 +34527,13 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.request = GetPrincipalsInRoleRequest() - self.request.read(iprot) + if ftype == TType.STRING: + self.token_owner = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.renewer_kerberos_principal_name = iprot.readString() else: iprot.skip(ftype) else: @@ -32905,10 +34545,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_principals_in_role_args') - if self.request is not None: - oprot.writeFieldBegin('request', TType.STRUCT, 1) - self.request.write(oprot) + oprot.writeStructBegin('get_delegation_token_args') + if self.token_owner is not None: + oprot.writeFieldBegin('token_owner', TType.STRING, 1) + oprot.writeString(self.token_owner) + oprot.writeFieldEnd() + if self.renewer_kerberos_principal_name is not None: + oprot.writeFieldBegin('renewer_kerberos_principal_name', TType.STRING, 2) + oprot.writeString(self.renewer_kerberos_principal_name) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -32919,7 +34563,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.request) + value = (value * 31) ^ hash(self.token_owner) + value = (value * 31) ^ hash(self.renewer_kerberos_principal_name) return value def __repr__(self): @@ -32933,7 +34578,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_principals_in_role_result: +class get_delegation_token_result: """ Attributes: - success @@ -32941,7 +34586,7 @@ class get_principals_in_role_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetPrincipalsInRoleResponse, GetPrincipalsInRoleResponse.thrift_spec), None, ), # 0 + (0, TType.STRING, 'success', None, None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -32959,9 +34604,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRUCT: - self.success = GetPrincipalsInRoleResponse() - self.success.read(iprot) + if ftype == TType.STRING: + self.success = iprot.readString() else: iprot.skip(ftype) elif fid == 1: @@ -32979,10 +34623,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_principals_in_role_result') + oprot.writeStructBegin('get_delegation_token_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeFieldBegin('success', TType.STRING, 0) + oprot.writeString(self.success) oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -33012,19 +34656,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_role_grants_for_principal_args: +class renew_delegation_token_args: """ Attributes: - - request + - token_str_form """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (GetRoleGrantsForPrincipalRequest, GetRoleGrantsForPrincipalRequest.thrift_spec), None, ), # 1 + (1, TType.STRING, 'token_str_form', None, None, ), # 1 ) - def __init__(self, request=None,): - self.request = request + def __init__(self, token_str_form=None,): + self.token_str_form = token_str_form def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33036,9 +34680,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.request = GetRoleGrantsForPrincipalRequest() - self.request.read(iprot) + if ftype == TType.STRING: + self.token_str_form = iprot.readString() else: iprot.skip(ftype) else: @@ -33050,10 +34693,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_role_grants_for_principal_args') - if self.request is not None: - oprot.writeFieldBegin('request', TType.STRUCT, 1) - self.request.write(oprot) + oprot.writeStructBegin('renew_delegation_token_args') + if self.token_str_form is not None: + oprot.writeFieldBegin('token_str_form', TType.STRING, 1) + oprot.writeString(self.token_str_form) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33064,7 +34707,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.request) + value = (value * 31) ^ hash(self.token_str_form) return value def __repr__(self): @@ -33078,7 +34721,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_role_grants_for_principal_result: +class renew_delegation_token_result: """ Attributes: - success @@ -33086,7 +34729,7 @@ class get_role_grants_for_principal_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetRoleGrantsForPrincipalResponse, GetRoleGrantsForPrincipalResponse.thrift_spec), None, ), # 0 + (0, TType.I64, 'success', None, None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -33104,9 +34747,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRUCT: - self.success = GetRoleGrantsForPrincipalResponse() - self.success.read(iprot) + if ftype == TType.I64: + self.success = iprot.readI64() else: iprot.skip(ftype) elif fid == 1: @@ -33124,10 +34766,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_role_grants_for_principal_result') + oprot.writeStructBegin('renew_delegation_token_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeFieldBegin('success', TType.I64, 0) + oprot.writeI64(self.success) oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -33157,25 +34799,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_privilege_set_args: +class cancel_delegation_token_args: """ Attributes: - - hiveObject - - user_name - - group_names + - token_str_form """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'hiveObject', (HiveObjectRef, HiveObjectRef.thrift_spec), None, ), # 1 - (2, TType.STRING, 'user_name', None, None, ), # 2 - (3, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 3 + (1, TType.STRING, 'token_str_form', None, None, ), # 1 ) - def __init__(self, hiveObject=None, user_name=None, group_names=None,): - self.hiveObject = hiveObject - self.user_name = user_name - self.group_names = group_names + def __init__(self, token_str_form=None,): + self.token_str_form = token_str_form def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33187,24 +34823,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.hiveObject = HiveObjectRef() - self.hiveObject.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: if ftype == TType.STRING: - self.user_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.LIST: - self.group_names = [] - (_etype1134, _size1131) = iprot.readListBegin() - for _i1135 in xrange(_size1131): - _elem1136 = iprot.readString() - self.group_names.append(_elem1136) - iprot.readListEnd() + self.token_str_form = iprot.readString() else: iprot.skip(ftype) else: @@ -33216,21 +34836,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_privilege_set_args') - if self.hiveObject is not None: - oprot.writeFieldBegin('hiveObject', TType.STRUCT, 1) - self.hiveObject.write(oprot) - oprot.writeFieldEnd() - if self.user_name is not None: - oprot.writeFieldBegin('user_name', TType.STRING, 2) - oprot.writeString(self.user_name) - oprot.writeFieldEnd() - if self.group_names is not None: - oprot.writeFieldBegin('group_names', TType.LIST, 3) - oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1137 in self.group_names: - oprot.writeString(iter1137) - oprot.writeListEnd() + oprot.writeStructBegin('cancel_delegation_token_args') + if self.token_str_form is not None: + oprot.writeFieldBegin('token_str_form', TType.STRING, 1) + oprot.writeString(self.token_str_form) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33241,9 +34850,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.hiveObject) - value = (value * 31) ^ hash(self.user_name) - value = (value * 31) ^ hash(self.group_names) + value = (value * 31) ^ hash(self.token_str_form) return value def __repr__(self): @@ -33257,20 +34864,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_privilege_set_result: +class cancel_delegation_token_result: """ Attributes: - - success - o1 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 0 + None, # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None, o1=None,): - self.success = success + def __init__(self, o1=None,): self.o1 = o1 def read(self, iprot): @@ -33282,13 +34887,7 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.STRUCT: - self.success = PrincipalPrivilegeSet() - self.success.read(iprot) - else: - iprot.skip(ftype) - elif fid == 1: + if fid == 1: if ftype == TType.STRUCT: self.o1 = MetaException() self.o1.read(iprot) @@ -33303,11 +34902,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_privilege_set_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('cancel_delegation_token_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -33321,7 +34916,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) return value @@ -33336,25 +34930,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class list_privileges_args: +class add_token_args: """ Attributes: - - principal_name - - principal_type - - hiveObject + - token_identifier + - delegation_token """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'principal_name', None, None, ), # 1 - (2, TType.I32, 'principal_type', None, None, ), # 2 - (3, TType.STRUCT, 'hiveObject', (HiveObjectRef, HiveObjectRef.thrift_spec), None, ), # 3 + (1, TType.STRING, 'token_identifier', None, None, ), # 1 + (2, TType.STRING, 'delegation_token', None, None, ), # 2 ) - def __init__(self, principal_name=None, principal_type=None, hiveObject=None,): - self.principal_name = principal_name - self.principal_type = principal_type - self.hiveObject = hiveObject + def __init__(self, token_identifier=None, delegation_token=None,): + self.token_identifier = token_identifier + self.delegation_token = delegation_token def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33367,18 +34958,12 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.principal_name = iprot.readString() + self.token_identifier = iprot.readString() else: iprot.skip(ftype) elif fid == 2: - if ftype == TType.I32: - self.principal_type = iprot.readI32() - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.STRUCT: - self.hiveObject = HiveObjectRef() - self.hiveObject.read(iprot) + if ftype == TType.STRING: + self.delegation_token = iprot.readString() else: iprot.skip(ftype) else: @@ -33390,18 +34975,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('list_privileges_args') - if self.principal_name is not None: - oprot.writeFieldBegin('principal_name', TType.STRING, 1) - oprot.writeString(self.principal_name) - oprot.writeFieldEnd() - if self.principal_type is not None: - oprot.writeFieldBegin('principal_type', TType.I32, 2) - oprot.writeI32(self.principal_type) + oprot.writeStructBegin('add_token_args') + if self.token_identifier is not None: + oprot.writeFieldBegin('token_identifier', TType.STRING, 1) + oprot.writeString(self.token_identifier) oprot.writeFieldEnd() - if self.hiveObject is not None: - oprot.writeFieldBegin('hiveObject', TType.STRUCT, 3) - self.hiveObject.write(oprot) + if self.delegation_token is not None: + oprot.writeFieldBegin('delegation_token', TType.STRING, 2) + oprot.writeString(self.delegation_token) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33412,9 +34993,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.principal_name) - value = (value * 31) ^ hash(self.principal_type) - value = (value * 31) ^ hash(self.hiveObject) + value = (value * 31) ^ hash(self.token_identifier) + value = (value * 31) ^ hash(self.delegation_token) return value def __repr__(self): @@ -33428,21 +35008,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class list_privileges_result: +class add_token_result: """ Attributes: - success - - o1 """ thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRUCT,(HiveObjectPrivilege, HiveObjectPrivilege.thrift_spec)), None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (0, TType.BOOL, 'success', None, None, ), # 0 ) - def __init__(self, success=None, o1=None,): + def __init__(self, success=None,): self.success = success - self.o1 = o1 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33454,20 +35031,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.LIST: - self.success = [] - (_etype1141, _size1138) = iprot.readListBegin() - for _i1142 in xrange(_size1138): - _elem1143 = HiveObjectPrivilege() - _elem1143.read(iprot) - self.success.append(_elem1143) - iprot.readListEnd() - else: - iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) + if ftype == TType.BOOL: + self.success = iprot.readBool() else: iprot.skip(ftype) else: @@ -33479,17 +35044,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('list_privileges_result') + oprot.writeStructBegin('add_token_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1144 in self.success: - iter1144.write(oprot) - oprot.writeListEnd() - oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33501,7 +35059,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -33515,19 +35072,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_privileges_args: +class remove_token_args: """ Attributes: - - privileges + - token_identifier """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'privileges', (PrivilegeBag, PrivilegeBag.thrift_spec), None, ), # 1 + (1, TType.STRING, 'token_identifier', None, None, ), # 1 ) - def __init__(self, privileges=None,): - self.privileges = privileges + def __init__(self, token_identifier=None,): + self.token_identifier = token_identifier def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33539,9 +35096,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.privileges = PrivilegeBag() - self.privileges.read(iprot) + if ftype == TType.STRING: + self.token_identifier = iprot.readString() else: iprot.skip(ftype) else: @@ -33553,10 +35109,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_privileges_args') - if self.privileges is not None: - oprot.writeFieldBegin('privileges', TType.STRUCT, 1) - self.privileges.write(oprot) + oprot.writeStructBegin('remove_token_args') + if self.token_identifier is not None: + oprot.writeFieldBegin('token_identifier', TType.STRING, 1) + oprot.writeString(self.token_identifier) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33567,7 +35123,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.privileges) + value = (value * 31) ^ hash(self.token_identifier) return value def __repr__(self): @@ -33581,21 +35137,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_privileges_result: +class remove_token_result: """ Attributes: - success - - o1 """ thrift_spec = ( (0, TType.BOOL, 'success', None, None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None, o1=None,): + def __init__(self, success=None,): self.success = success - self.o1 = o1 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33611,12 +35164,6 @@ def read(self, iprot): self.success = iprot.readBool() else: iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -33626,15 +35173,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_privileges_result') + oprot.writeStructBegin('remove_token_result') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33645,7 +35188,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -33659,19 +35201,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class revoke_privileges_args: +class get_token_args: """ Attributes: - - privileges + - token_identifier """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'privileges', (PrivilegeBag, PrivilegeBag.thrift_spec), None, ), # 1 + (1, TType.STRING, 'token_identifier', None, None, ), # 1 ) - def __init__(self, privileges=None,): - self.privileges = privileges + def __init__(self, token_identifier=None,): + self.token_identifier = token_identifier def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33683,9 +35225,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.privileges = PrivilegeBag() - self.privileges.read(iprot) + if ftype == TType.STRING: + self.token_identifier = iprot.readString() else: iprot.skip(ftype) else: @@ -33697,10 +35238,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('revoke_privileges_args') - if self.privileges is not None: - oprot.writeFieldBegin('privileges', TType.STRUCT, 1) - self.privileges.write(oprot) + oprot.writeStructBegin('get_token_args') + if self.token_identifier is not None: + oprot.writeFieldBegin('token_identifier', TType.STRING, 1) + oprot.writeString(self.token_identifier) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33711,7 +35252,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.privileges) + value = (value * 31) ^ hash(self.token_identifier) return value def __repr__(self): @@ -33725,21 +35266,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class revoke_privileges_result: +class get_token_result: """ Attributes: - success - - o1 """ thrift_spec = ( - (0, TType.BOOL, 'success', None, None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (0, TType.STRING, 'success', None, None, ), # 0 ) - def __init__(self, success=None, o1=None,): + def __init__(self, success=None,): self.success = success - self.o1 = o1 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33751,14 +35289,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.BOOL: - self.success = iprot.readBool() - else: - iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) + if ftype == TType.STRING: + self.success = iprot.readString() else: iprot.skip(ftype) else: @@ -33770,14 +35302,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('revoke_privileges_result') + oprot.writeStructBegin('get_token_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.BOOL, 0) - oprot.writeBool(self.success) - oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) + oprot.writeFieldBegin('success', TType.STRING, 0) + oprot.writeString(self.success) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33789,7 +35317,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -33803,20 +35330,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_revoke_privileges_args: - """ - Attributes: - - request - """ +class get_all_token_identifiers_args: thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'request', (GrantRevokePrivilegeRequest, GrantRevokePrivilegeRequest.thrift_spec), None, ), # 1 ) - def __init__(self, request=None,): - self.request = request - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -33826,12 +35344,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.request = GrantRevokePrivilegeRequest() - self.request.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -33841,11 +35353,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_revoke_privileges_args') - if self.request is not None: - oprot.writeFieldBegin('request', TType.STRUCT, 1) - self.request.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('get_all_token_identifiers_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -33855,7 +35363,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -33869,21 +35376,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_revoke_privileges_result: +class get_all_token_identifiers_result: """ Attributes: - success - - o1 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GrantRevokePrivilegeResponse, GrantRevokePrivilegeResponse.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 ) - def __init__(self, success=None, o1=None,): + def __init__(self, success=None,): self.success = success - self.o1 = o1 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33895,15 +35399,13 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRUCT: - self.success = GrantRevokePrivilegeResponse() - self.success.read(iprot) - else: - iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) + if ftype == TType.LIST: + self.success = [] + (_etype1162, _size1159) = iprot.readListBegin() + for _i1163 in xrange(_size1159): + _elem1164 = iprot.readString() + self.success.append(_elem1164) + iprot.readListEnd() else: iprot.skip(ftype) else: @@ -33915,14 +35417,13 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_revoke_privileges_result') + oprot.writeStructBegin('get_all_token_identifiers_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) - oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1165 in self.success: + oprot.writeString(iter1165) + oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33934,7 +35435,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -33948,22 +35448,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class set_ugi_args: +class add_master_key_args: """ Attributes: - - user_name - - group_names + - key """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'user_name', None, None, ), # 1 - (2, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 2 + (1, TType.STRING, 'key', None, None, ), # 1 ) - def __init__(self, user_name=None, group_names=None,): - self.user_name = user_name - self.group_names = group_names + def __init__(self, key=None,): + self.key = key def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33976,17 +35473,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.user_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.LIST: - self.group_names = [] - (_etype1148, _size1145) = iprot.readListBegin() - for _i1149 in xrange(_size1145): - _elem1150 = iprot.readString() - self.group_names.append(_elem1150) - iprot.readListEnd() + self.key = iprot.readString() else: iprot.skip(ftype) else: @@ -33998,17 +35485,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('set_ugi_args') - if self.user_name is not None: - oprot.writeFieldBegin('user_name', TType.STRING, 1) - oprot.writeString(self.user_name) - oprot.writeFieldEnd() - if self.group_names is not None: - oprot.writeFieldBegin('group_names', TType.LIST, 2) - oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1151 in self.group_names: - oprot.writeString(iter1151) - oprot.writeListEnd() + oprot.writeStructBegin('add_master_key_args') + if self.key is not None: + oprot.writeFieldBegin('key', TType.STRING, 1) + oprot.writeString(self.key) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34019,8 +35499,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.user_name) - value = (value * 31) ^ hash(self.group_names) + value = (value * 31) ^ hash(self.key) return value def __repr__(self): @@ -34034,7 +35513,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class set_ugi_result: +class add_master_key_result: """ Attributes: - success @@ -34042,7 +35521,7 @@ class set_ugi_result: """ thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (0, TType.I32, 'success', None, None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -34060,13 +35539,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.LIST: - self.success = [] - (_etype1155, _size1152) = iprot.readListBegin() - for _i1156 in xrange(_size1152): - _elem1157 = iprot.readString() - self.success.append(_elem1157) - iprot.readListEnd() + if ftype == TType.I32: + self.success = iprot.readI32() else: iprot.skip(ftype) elif fid == 1: @@ -34084,13 +35558,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('set_ugi_result') + oprot.writeStructBegin('add_master_key_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1158 in self.success: - oprot.writeString(iter1158) - oprot.writeListEnd() + oprot.writeFieldBegin('success', TType.I32, 0) + oprot.writeI32(self.success) oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -34120,22 +35591,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_delegation_token_args: +class update_master_key_args: """ Attributes: - - token_owner - - renewer_kerberos_principal_name + - seq_number + - key """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'token_owner', None, None, ), # 1 - (2, TType.STRING, 'renewer_kerberos_principal_name', None, None, ), # 2 + (1, TType.I32, 'seq_number', None, None, ), # 1 + (2, TType.STRING, 'key', None, None, ), # 2 ) - def __init__(self, token_owner=None, renewer_kerberos_principal_name=None,): - self.token_owner = token_owner - self.renewer_kerberos_principal_name = renewer_kerberos_principal_name + def __init__(self, seq_number=None, key=None,): + self.seq_number = seq_number + self.key = key def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34147,13 +35618,13 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.token_owner = iprot.readString() + if ftype == TType.I32: + self.seq_number = iprot.readI32() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: - self.renewer_kerberos_principal_name = iprot.readString() + self.key = iprot.readString() else: iprot.skip(ftype) else: @@ -34165,14 +35636,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_delegation_token_args') - if self.token_owner is not None: - oprot.writeFieldBegin('token_owner', TType.STRING, 1) - oprot.writeString(self.token_owner) + oprot.writeStructBegin('update_master_key_args') + if self.seq_number is not None: + oprot.writeFieldBegin('seq_number', TType.I32, 1) + oprot.writeI32(self.seq_number) oprot.writeFieldEnd() - if self.renewer_kerberos_principal_name is not None: - oprot.writeFieldBegin('renewer_kerberos_principal_name', TType.STRING, 2) - oprot.writeString(self.renewer_kerberos_principal_name) + if self.key is not None: + oprot.writeFieldBegin('key', TType.STRING, 2) + oprot.writeString(self.key) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34183,8 +35654,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.token_owner) - value = (value * 31) ^ hash(self.renewer_kerberos_principal_name) + value = (value * 31) ^ hash(self.seq_number) + value = (value * 31) ^ hash(self.key) return value def __repr__(self): @@ -34198,21 +35669,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_delegation_token_result: +class update_master_key_result: """ Attributes: - - success - o1 + - o2 """ thrift_spec = ( - (0, TType.STRING, 'success', None, None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None, o1=None,): - self.success = success + def __init__(self, o1=None, o2=None,): self.o1 = o1 + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34223,15 +35695,16 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.STRING: - self.success = iprot.readString() + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) else: iprot.skip(ftype) - elif fid == 1: + elif fid == 2: if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) + self.o2 = MetaException() + self.o2.read(iprot) else: iprot.skip(ftype) else: @@ -34243,15 +35716,15 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_delegation_token_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRING, 0) - oprot.writeString(self.success) - oprot.writeFieldEnd() + oprot.writeStructBegin('update_master_key_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34261,8 +35734,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -34276,19 +35749,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class renew_delegation_token_args: +class remove_master_key_args: """ Attributes: - - token_str_form + - key_seq """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'token_str_form', None, None, ), # 1 + (1, TType.I32, 'key_seq', None, None, ), # 1 ) - def __init__(self, token_str_form=None,): - self.token_str_form = token_str_form + def __init__(self, key_seq=None,): + self.key_seq = key_seq def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34300,8 +35773,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.token_str_form = iprot.readString() + if ftype == TType.I32: + self.key_seq = iprot.readI32() else: iprot.skip(ftype) else: @@ -34313,10 +35786,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('renew_delegation_token_args') - if self.token_str_form is not None: - oprot.writeFieldBegin('token_str_form', TType.STRING, 1) - oprot.writeString(self.token_str_form) + oprot.writeStructBegin('remove_master_key_args') + if self.key_seq is not None: + oprot.writeFieldBegin('key_seq', TType.I32, 1) + oprot.writeI32(self.key_seq) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34327,7 +35800,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.token_str_form) + value = (value * 31) ^ hash(self.key_seq) return value def __repr__(self): @@ -34341,21 +35814,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class renew_delegation_token_result: +class remove_master_key_result: """ Attributes: - success - - o1 """ thrift_spec = ( - (0, TType.I64, 'success', None, None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (0, TType.BOOL, 'success', None, None, ), # 0 ) - def __init__(self, success=None, o1=None,): + def __init__(self, success=None,): self.success = success - self.o1 = o1 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34367,14 +35837,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.I64: - self.success = iprot.readI64() - else: - iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) + if ftype == TType.BOOL: + self.success = iprot.readBool() else: iprot.skip(ftype) else: @@ -34386,14 +35850,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('renew_delegation_token_result') + oprot.writeStructBegin('remove_master_key_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.I64, 0) - oprot.writeI64(self.success) - oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34405,7 +35865,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -34419,20 +35878,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class cancel_delegation_token_args: - """ - Attributes: - - token_str_form - """ +class get_master_keys_args: thrift_spec = ( - None, # 0 - (1, TType.STRING, 'token_str_form', None, None, ), # 1 ) - def __init__(self, token_str_form=None,): - self.token_str_form = token_str_form - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -34442,11 +35892,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRING: - self.token_str_form = iprot.readString() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34456,11 +35901,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('cancel_delegation_token_args') - if self.token_str_form is not None: - oprot.writeFieldBegin('token_str_form', TType.STRING, 1) - oprot.writeString(self.token_str_form) - oprot.writeFieldEnd() + oprot.writeStructBegin('get_master_keys_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -34470,7 +35911,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.token_str_form) return value def __repr__(self): @@ -34484,19 +35924,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class cancel_delegation_token_result: +class get_master_keys_result: """ Attributes: - - o1 + - success """ thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 ) - def __init__(self, o1=None,): - self.o1 = o1 + def __init__(self, success=None,): + self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34507,10 +35946,14 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1169, _size1166) = iprot.readListBegin() + for _i1170 in xrange(_size1166): + _elem1171 = iprot.readString() + self.success.append(_elem1171) + iprot.readListEnd() else: iprot.skip(ftype) else: @@ -34522,10 +35965,13 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('cancel_delegation_token_result') - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) + oprot.writeStructBegin('get_master_keys_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1172 in self.success: + oprot.writeString(iter1172) + oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34536,7 +35982,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.success) return value def __repr__(self): @@ -34550,23 +35996,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class add_token_args: - """ - Attributes: - - token_identifier - - delegation_token - """ +class get_open_txns_args: thrift_spec = ( - None, # 0 - (1, TType.STRING, 'token_identifier', None, None, ), # 1 - (2, TType.STRING, 'delegation_token', None, None, ), # 2 ) - def __init__(self, token_identifier=None, delegation_token=None,): - self.token_identifier = token_identifier - self.delegation_token = delegation_token - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -34576,16 +36010,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRING: - self.token_identifier = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.delegation_token = iprot.readString() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34595,15 +36019,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('add_token_args') - if self.token_identifier is not None: - oprot.writeFieldBegin('token_identifier', TType.STRING, 1) - oprot.writeString(self.token_identifier) - oprot.writeFieldEnd() - if self.delegation_token is not None: - oprot.writeFieldBegin('delegation_token', TType.STRING, 2) - oprot.writeString(self.delegation_token) - oprot.writeFieldEnd() + oprot.writeStructBegin('get_open_txns_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -34613,8 +36029,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.token_identifier) - value = (value * 31) ^ hash(self.delegation_token) return value def __repr__(self): @@ -34628,14 +36042,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class add_token_result: +class get_open_txns_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.BOOL, 'success', None, None, ), # 0 + (0, TType.STRUCT, 'success', (GetOpenTxnsResponse, GetOpenTxnsResponse.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -34651,8 +36065,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.BOOL: - self.success = iprot.readBool() + if ftype == TType.STRUCT: + self.success = GetOpenTxnsResponse() + self.success.read(iprot) else: iprot.skip(ftype) else: @@ -34664,10 +36079,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('add_token_result') + oprot.writeStructBegin('get_open_txns_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.BOOL, 0) - oprot.writeBool(self.success) + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34692,20 +36107,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class remove_token_args: - """ - Attributes: - - token_identifier - """ +class get_open_txns_info_args: thrift_spec = ( - None, # 0 - (1, TType.STRING, 'token_identifier', None, None, ), # 1 ) - def __init__(self, token_identifier=None,): - self.token_identifier = token_identifier - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -34715,11 +36121,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRING: - self.token_identifier = iprot.readString() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34729,11 +36130,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('remove_token_args') - if self.token_identifier is not None: - oprot.writeFieldBegin('token_identifier', TType.STRING, 1) - oprot.writeString(self.token_identifier) - oprot.writeFieldEnd() + oprot.writeStructBegin('get_open_txns_info_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -34743,7 +36140,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.token_identifier) return value def __repr__(self): @@ -34757,14 +36153,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class remove_token_result: +class get_open_txns_info_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.BOOL, 'success', None, None, ), # 0 + (0, TType.STRUCT, 'success', (GetOpenTxnsInfoResponse, GetOpenTxnsInfoResponse.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -34780,8 +36176,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.BOOL: - self.success = iprot.readBool() + if ftype == TType.STRUCT: + self.success = GetOpenTxnsInfoResponse() + self.success.read(iprot) else: iprot.skip(ftype) else: @@ -34793,10 +36190,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('remove_token_result') + oprot.writeStructBegin('get_open_txns_info_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.BOOL, 0) - oprot.writeBool(self.success) + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34821,19 +36218,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_token_args: +class open_txns_args: """ Attributes: - - token_identifier + - rqst """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'token_identifier', None, None, ), # 1 + (1, TType.STRUCT, 'rqst', (OpenTxnRequest, OpenTxnRequest.thrift_spec), None, ), # 1 ) - def __init__(self, token_identifier=None,): - self.token_identifier = token_identifier + def __init__(self, rqst=None,): + self.rqst = rqst def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34845,8 +36242,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.token_identifier = iprot.readString() + if ftype == TType.STRUCT: + self.rqst = OpenTxnRequest() + self.rqst.read(iprot) else: iprot.skip(ftype) else: @@ -34858,10 +36256,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_token_args') - if self.token_identifier is not None: - oprot.writeFieldBegin('token_identifier', TType.STRING, 1) - oprot.writeString(self.token_identifier) + oprot.writeStructBegin('open_txns_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34872,7 +36270,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.token_identifier) + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -34886,14 +36284,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_token_result: +class open_txns_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRING, 'success', None, None, ), # 0 + (0, TType.STRUCT, 'success', (OpenTxnsResponse, OpenTxnsResponse.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -34909,8 +36307,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRING: - self.success = iprot.readString() + if ftype == TType.STRUCT: + self.success = OpenTxnsResponse() + self.success.read(iprot) else: iprot.skip(ftype) else: @@ -34922,10 +36321,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_token_result') + oprot.writeStructBegin('open_txns_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRING, 0) - oprot.writeString(self.success) + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34950,11 +36349,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_all_token_identifiers_args: +class abort_txn_args: + """ + Attributes: + - rqst + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (AbortTxnRequest, AbortTxnRequest.thrift_spec), None, ), # 1 ) + def __init__(self, rqst=None,): + self.rqst = rqst + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -34964,6 +36372,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = AbortTxnRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34973,7 +36387,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_all_token_identifiers_args') + oprot.writeStructBegin('abort_txn_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34983,6 +36401,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -34996,18 +36415,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_all_token_identifiers_result: +class abort_txn_result: """ Attributes: - - success + - o1 """ thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None,): - self.success = success + def __init__(self, o1=None,): + self.o1 = o1 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35018,14 +36438,10 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.LIST: - self.success = [] - (_etype1162, _size1159) = iprot.readListBegin() - for _i1163 in xrange(_size1159): - _elem1164 = iprot.readString() - self.success.append(_elem1164) - iprot.readListEnd() + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException() + self.o1.read(iprot) else: iprot.skip(ftype) else: @@ -35037,13 +36453,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_all_token_identifiers_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1165 in self.success: - oprot.writeString(iter1165) - oprot.writeListEnd() + oprot.writeStructBegin('abort_txn_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35054,7 +36467,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -35068,19 +36481,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class add_master_key_args: +class abort_txns_args: """ Attributes: - - key + - rqst """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'key', None, None, ), # 1 + (1, TType.STRUCT, 'rqst', (AbortTxnsRequest, AbortTxnsRequest.thrift_spec), None, ), # 1 ) - def __init__(self, key=None,): - self.key = key + def __init__(self, rqst=None,): + self.rqst = rqst def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35092,8 +36505,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.key = iprot.readString() + if ftype == TType.STRUCT: + self.rqst = AbortTxnsRequest() + self.rqst.read(iprot) else: iprot.skip(ftype) else: @@ -35105,10 +36519,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('add_master_key_args') - if self.key is not None: - oprot.writeFieldBegin('key', TType.STRING, 1) - oprot.writeString(self.key) + oprot.writeStructBegin('abort_txns_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35119,7 +36533,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.key) + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -35133,20 +36547,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class add_master_key_result: +class abort_txns_result: """ Attributes: - - success - o1 """ thrift_spec = ( - (0, TType.I32, 'success', None, None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None, o1=None,): - self.success = success + def __init__(self, o1=None,): self.o1 = o1 def read(self, iprot): @@ -35158,14 +36570,9 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.I32: - self.success = iprot.readI32() - else: - iprot.skip(ftype) - elif fid == 1: + if fid == 1: if ftype == TType.STRUCT: - self.o1 = MetaException() + self.o1 = NoSuchTxnException() self.o1.read(iprot) else: iprot.skip(ftype) @@ -35178,11 +36585,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('add_master_key_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.I32, 0) - oprot.writeI32(self.success) - oprot.writeFieldEnd() + oprot.writeStructBegin('abort_txns_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -35196,7 +36599,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) return value @@ -35211,22 +36613,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class update_master_key_args: +class commit_txn_args: """ Attributes: - - seq_number - - key + - rqst """ thrift_spec = ( None, # 0 - (1, TType.I32, 'seq_number', None, None, ), # 1 - (2, TType.STRING, 'key', None, None, ), # 2 + (1, TType.STRUCT, 'rqst', (CommitTxnRequest, CommitTxnRequest.thrift_spec), None, ), # 1 ) - def __init__(self, seq_number=None, key=None,): - self.seq_number = seq_number - self.key = key + def __init__(self, rqst=None,): + self.rqst = rqst def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35238,13 +36637,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.I32: - self.seq_number = iprot.readI32() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.key = iprot.readString() + if ftype == TType.STRUCT: + self.rqst = CommitTxnRequest() + self.rqst.read(iprot) else: iprot.skip(ftype) else: @@ -35256,14 +36651,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('update_master_key_args') - if self.seq_number is not None: - oprot.writeFieldBegin('seq_number', TType.I32, 1) - oprot.writeI32(self.seq_number) - oprot.writeFieldEnd() - if self.key is not None: - oprot.writeFieldBegin('key', TType.STRING, 2) - oprot.writeString(self.key) + oprot.writeStructBegin('commit_txn_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35274,8 +36665,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.seq_number) - value = (value * 31) ^ hash(self.key) + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -35289,7 +36679,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class update_master_key_result: +class commit_txn_result: """ Attributes: - o1 @@ -35298,8 +36688,8 @@ class update_master_key_result: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 ) def __init__(self, o1=None, o2=None,): @@ -35317,13 +36707,13 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.o1 = NoSuchObjectException() + self.o1 = NoSuchTxnException() self.o1.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = MetaException() + self.o2 = TxnAbortedException() self.o2.read(iprot) else: iprot.skip(ftype) @@ -35336,7 +36726,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('update_master_key_result') + oprot.writeStructBegin('commit_txn_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -35369,19 +36759,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class remove_master_key_args: +class lock_args: """ Attributes: - - key_seq + - rqst """ thrift_spec = ( None, # 0 - (1, TType.I32, 'key_seq', None, None, ), # 1 + (1, TType.STRUCT, 'rqst', (LockRequest, LockRequest.thrift_spec), None, ), # 1 ) - def __init__(self, key_seq=None,): - self.key_seq = key_seq + def __init__(self, rqst=None,): + self.rqst = rqst def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35393,8 +36783,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.I32: - self.key_seq = iprot.readI32() + if ftype == TType.STRUCT: + self.rqst = LockRequest() + self.rqst.read(iprot) else: iprot.skip(ftype) else: @@ -35406,10 +36797,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('remove_master_key_args') - if self.key_seq is not None: - oprot.writeFieldBegin('key_seq', TType.I32, 1) - oprot.writeI32(self.key_seq) + oprot.writeStructBegin('lock_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35420,7 +36811,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.key_seq) + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -35434,18 +36825,24 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class remove_master_key_result: +class lock_result: """ Attributes: - success + - o1 + - o2 """ thrift_spec = ( - (0, TType.BOOL, 'success', None, None, ), # 0 + (0, TType.STRUCT, 'success', (LockResponse, LockResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None,): + def __init__(self, success=None, o1=None, o2=None,): self.success = success + self.o1 = o1 + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35457,8 +36854,21 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.BOOL: - self.success = iprot.readBool() + if ftype == TType.STRUCT: + self.success = LockResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnAbortedException() + self.o2.read(iprot) else: iprot.skip(ftype) else: @@ -35470,10 +36880,18 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('remove_master_key_result') + oprot.writeStructBegin('lock_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.BOOL, 0) - oprot.writeBool(self.success) + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35485,6 +36903,8 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -35498,11 +36918,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_master_keys_args: +class check_lock_args: + """ + Attributes: + - rqst + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (CheckLockRequest, CheckLockRequest.thrift_spec), None, ), # 1 ) + def __init__(self, rqst=None,): + self.rqst = rqst + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -35512,6 +36941,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = CheckLockRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -35521,7 +36956,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_master_keys_args') + oprot.writeStructBegin('check_lock_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35531,6 +36970,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -35544,18 +36984,27 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_master_keys_result: +class check_lock_result: """ Attributes: - success + - o1 + - o2 + - o3 """ thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (0, TType.STRUCT, 'success', (LockResponse, LockResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (NoSuchLockException, NoSuchLockException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None,): + def __init__(self, success=None, o1=None, o2=None, o3=None,): self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35567,13 +37016,27 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.LIST: - self.success = [] - (_etype1169, _size1166) = iprot.readListBegin() - for _i1170 in xrange(_size1166): - _elem1171 = iprot.readString() - self.success.append(_elem1171) - iprot.readListEnd() + if ftype == TType.STRUCT: + self.success = LockResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnAbortedException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = NoSuchLockException() + self.o3.read(iprot) else: iprot.skip(ftype) else: @@ -35585,13 +37048,22 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_master_keys_result') + oprot.writeStructBegin('check_lock_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1172 in self.success: - oprot.writeString(iter1172) - oprot.writeListEnd() + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35603,6 +37075,9 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -35616,11 +37091,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_open_txns_args: +class unlock_args: + """ + Attributes: + - rqst + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (UnlockRequest, UnlockRequest.thrift_spec), None, ), # 1 ) + def __init__(self, rqst=None,): + self.rqst = rqst + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -35630,6 +37114,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = UnlockRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -35639,7 +37129,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_open_txns_args') + oprot.writeStructBegin('unlock_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35649,6 +37143,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -35662,18 +37157,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_open_txns_result: +class unlock_result: """ Attributes: - - success + - o1 + - o2 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetOpenTxnsResponse, GetOpenTxnsResponse.thrift_spec), None, ), # 0 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchLockException, NoSuchLockException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (TxnOpenException, TxnOpenException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None,): - self.success = success + def __init__(self, o1=None, o2=None,): + self.o1 = o1 + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35684,10 +37183,16 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: + if fid == 1: if ftype == TType.STRUCT: - self.success = GetOpenTxnsResponse() - self.success.read(iprot) + self.o1 = NoSuchLockException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnOpenException() + self.o2.read(iprot) else: iprot.skip(ftype) else: @@ -35699,10 +37204,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_open_txns_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeStructBegin('unlock_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35713,7 +37222,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -35727,11 +37237,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_open_txns_info_args: +class show_locks_args: + """ + Attributes: + - rqst + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (ShowLocksRequest, ShowLocksRequest.thrift_spec), None, ), # 1 ) + def __init__(self, rqst=None,): + self.rqst = rqst + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -35741,6 +37260,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = ShowLocksRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -35750,7 +37275,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_open_txns_info_args') + oprot.writeStructBegin('show_locks_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35760,6 +37289,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -35773,14 +37303,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_open_txns_info_result: +class show_locks_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetOpenTxnsInfoResponse, GetOpenTxnsInfoResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (ShowLocksResponse, ShowLocksResponse.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -35797,7 +37327,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = GetOpenTxnsInfoResponse() + self.success = ShowLocksResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -35810,7 +37340,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_open_txns_info_result') + oprot.writeStructBegin('show_locks_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -35838,19 +37368,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class open_txns_args: +class heartbeat_args: """ Attributes: - - rqst + - ids """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (OpenTxnRequest, OpenTxnRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'ids', (HeartbeatRequest, HeartbeatRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, ids=None,): + self.ids = ids def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35863,8 +37393,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = OpenTxnRequest() - self.rqst.read(iprot) + self.ids = HeartbeatRequest() + self.ids.read(iprot) else: iprot.skip(ftype) else: @@ -35876,10 +37406,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('open_txns_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) + oprot.writeStructBegin('heartbeat_args') + if self.ids is not None: + oprot.writeFieldBegin('ids', TType.STRUCT, 1) + self.ids.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35890,7 +37420,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.ids) return value def __repr__(self): @@ -35904,18 +37434,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class open_txns_result: +class heartbeat_result: """ Attributes: - - success + - o1 + - o2 + - o3 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (OpenTxnsResponse, OpenTxnsResponse.thrift_spec), None, ), # 0 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchLockException, NoSuchLockException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None,): - self.success = success + def __init__(self, o1=None, o2=None, o3=None,): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35926,10 +37463,22 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: + if fid == 1: if ftype == TType.STRUCT: - self.success = OpenTxnsResponse() - self.success.read(iprot) + self.o1 = NoSuchLockException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchTxnException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = TxnAbortedException() + self.o3.read(iprot) else: iprot.skip(ftype) else: @@ -35941,10 +37490,18 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('open_txns_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeStructBegin('heartbeat_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35955,7 +37512,9 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -35969,19 +37528,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class abort_txn_args: +class heartbeat_txn_range_args: """ Attributes: - - rqst + - txns """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (AbortTxnRequest, AbortTxnRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'txns', (HeartbeatTxnRangeRequest, HeartbeatTxnRangeRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, txns=None,): + self.txns = txns def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35994,8 +37553,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = AbortTxnRequest() - self.rqst.read(iprot) + self.txns = HeartbeatTxnRangeRequest() + self.txns.read(iprot) else: iprot.skip(ftype) else: @@ -36007,10 +37566,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('abort_txn_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) + oprot.writeStructBegin('heartbeat_txn_range_args') + if self.txns is not None: + oprot.writeFieldBegin('txns', TType.STRUCT, 1) + self.txns.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -36021,7 +37580,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.txns) return value def __repr__(self): @@ -36035,19 +37594,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class abort_txn_result: +class heartbeat_txn_range_result: """ Attributes: - - o1 + - success """ thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (0, TType.STRUCT, 'success', (HeartbeatTxnRangeResponse, HeartbeatTxnRangeResponse.thrift_spec), None, ), # 0 ) - def __init__(self, o1=None,): - self.o1 = o1 + def __init__(self, success=None,): + self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -36058,10 +37616,10 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: + if fid == 0: if ftype == TType.STRUCT: - self.o1 = NoSuchTxnException() - self.o1.read(iprot) + self.success = HeartbeatTxnRangeResponse() + self.success.read(iprot) else: iprot.skip(ftype) else: @@ -36073,10 +37631,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('abort_txn_result') - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) + oprot.writeStructBegin('heartbeat_txn_range_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -36087,7 +37645,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.success) return value def __repr__(self): @@ -36101,7 +37659,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class abort_txns_args: +class compact_args: """ Attributes: - rqst @@ -36109,7 +37667,7 @@ class abort_txns_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (AbortTxnsRequest, AbortTxnsRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (CompactionRequest, CompactionRequest.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -36126,7 +37684,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = AbortTxnsRequest() + self.rqst = CompactionRequest() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -36139,7 +37697,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('abort_txns_args') + oprot.writeStructBegin('compact_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -36167,20 +37725,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class abort_txns_result: - """ - Attributes: - - o1 - """ +class compact_result: thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 ) - def __init__(self, o1=None,): - self.o1 = o1 - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -36190,12 +37739,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.o1 = NoSuchTxnException() - self.o1.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -36205,11 +37748,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('abort_txns_result') - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('compact_result') oprot.writeFieldStop() oprot.writeStructEnd() @@ -36219,7 +37758,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -36233,7 +37771,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class commit_txn_args: +class compact2_args: """ Attributes: - rqst @@ -36241,7 +37779,7 @@ class commit_txn_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (CommitTxnRequest, CommitTxnRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (CompactionRequest, CompactionRequest.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -36258,7 +37796,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = CommitTxnRequest() + self.rqst = CompactionRequest() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -36271,7 +37809,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('commit_txn_args') + oprot.writeStructBegin('compact2_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -36299,22 +37837,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class commit_txn_result: +class compact2_result: """ Attributes: - - o1 - - o2 + - success """ thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 + (0, TType.STRUCT, 'success', (CompactionResponse, CompactionResponse.thrift_spec), None, ), # 0 ) - def __init__(self, o1=None, o2=None,): - self.o1 = o1 - self.o2 = o2 + def __init__(self, success=None,): + self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -36325,16 +37859,10 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.o1 = NoSuchTxnException() - self.o1.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: + if fid == 0: if ftype == TType.STRUCT: - self.o2 = TxnAbortedException() - self.o2.read(iprot) + self.success = CompactionResponse() + self.success.read(iprot) else: iprot.skip(ftype) else: @@ -36346,14 +37874,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('commit_txn_result') - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) + oprot.writeStructBegin('compact2_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -36364,8 +37888,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.success) return value def __repr__(self): @@ -36379,7 +37902,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class lock_args: +class show_compact_args: """ Attributes: - rqst @@ -36387,7 +37910,7 @@ class lock_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (LockRequest, LockRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (ShowCompactRequest, ShowCompactRequest.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -36404,7 +37927,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = LockRequest() + self.rqst = ShowCompactRequest() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -36417,7 +37940,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('lock_args') + oprot.writeStructBegin('show_compact_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -36445,24 +37968,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class lock_result: +class show_compact_result: """ Attributes: - success - - o1 - - o2 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (LockResponse, LockResponse.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 + (0, TType.STRUCT, 'success', (ShowCompactResponse, ShowCompactResponse.thrift_spec), None, ), # 0 ) - def __init__(self, success=None, o1=None, o2=None,): + def __init__(self, success=None,): self.success = success - self.o1 = o1 - self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -36475,22 +37992,10 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = LockResponse() + self.success = ShowCompactResponse() self.success.read(iprot) else: iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = NoSuchTxnException() - self.o1.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRUCT: - self.o2 = TxnAbortedException() - self.o2.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -36500,19 +38005,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('lock_result') + oprot.writeStructBegin('show_compact_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -36523,8 +38020,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -36538,7 +38033,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class check_lock_args: +class add_dynamic_partitions_args: """ Attributes: - rqst @@ -36546,7 +38041,7 @@ class check_lock_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (CheckLockRequest, CheckLockRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (AddDynamicPartitions, AddDynamicPartitions.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -36563,7 +38058,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = CheckLockRequest() + self.rqst = AddDynamicPartitions() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -36576,7 +38071,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('check_lock_args') + oprot.writeStructBegin('add_dynamic_partitions_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -36604,27 +38099,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class check_lock_result: +class add_dynamic_partitions_result: """ Attributes: - - success - o1 - o2 - - o3 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (LockResponse, LockResponse.thrift_spec), None, ), # 0 + None, # 0 (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 - (3, TType.STRUCT, 'o3', (NoSuchLockException, NoSuchLockException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o1=None, o2=None, o3=None,): - self.success = success + def __init__(self, o1=None, o2=None,): self.o1 = o1 self.o2 = o2 - self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -36635,13 +38125,7 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.STRUCT: - self.success = LockResponse() - self.success.read(iprot) - else: - iprot.skip(ftype) - elif fid == 1: + if fid == 1: if ftype == TType.STRUCT: self.o1 = NoSuchTxnException() self.o1.read(iprot) @@ -36653,12 +38137,6 @@ def read(self, iprot): self.o2.read(iprot) else: iprot.skip(ftype) - elif fid == 3: - if ftype == TType.STRUCT: - self.o3 = NoSuchLockException() - self.o3.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -36668,11 +38146,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('check_lock_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('add_dynamic_partitions_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -36681,10 +38155,6 @@ def write(self, oprot): oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() - if self.o3 is not None: - oprot.writeFieldBegin('o3', TType.STRUCT, 3) - self.o3.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -36694,10 +38164,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) value = (value * 31) ^ hash(self.o2) - value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -36711,7 +38179,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class unlock_args: +class get_next_notification_args: """ Attributes: - rqst @@ -36719,7 +38187,7 @@ class unlock_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (UnlockRequest, UnlockRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (NotificationEventRequest, NotificationEventRequest.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -36736,7 +38204,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = UnlockRequest() + self.rqst = NotificationEventRequest() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -36749,7 +38217,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('unlock_args') + oprot.writeStructBegin('get_next_notification_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -36777,22 +38245,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class unlock_result: +class get_next_notification_result: """ Attributes: - - o1 - - o2 + - success """ thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchLockException, NoSuchLockException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (TxnOpenException, TxnOpenException.thrift_spec), None, ), # 2 + (0, TType.STRUCT, 'success', (NotificationEventResponse, NotificationEventResponse.thrift_spec), None, ), # 0 ) - def __init__(self, o1=None, o2=None,): - self.o1 = o1 - self.o2 = o2 + def __init__(self, success=None,): + self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -36803,16 +38267,10 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.o1 = NoSuchLockException() - self.o1.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: + if fid == 0: if ftype == TType.STRUCT: - self.o2 = TxnOpenException() - self.o2.read(iprot) + self.success = NotificationEventResponse() + self.success.read(iprot) else: iprot.skip(ftype) else: @@ -36824,14 +38282,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('unlock_result') - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) + oprot.writeStructBegin('get_next_notification_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -36842,8 +38296,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.success) return value def __repr__(self): @@ -36857,20 +38310,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class show_locks_args: - """ - Attributes: - - rqst - """ +class get_current_notificationEventId_args: thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'rqst', (ShowLocksRequest, ShowLocksRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -36880,12 +38324,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.rqst = ShowLocksRequest() - self.rqst.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -36895,11 +38333,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('show_locks_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('get_current_notificationEventId_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -36909,7 +38343,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -36923,14 +38356,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class show_locks_result: +class get_current_notificationEventId_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (ShowLocksResponse, ShowLocksResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (CurrentNotificationEventId, CurrentNotificationEventId.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -36947,7 +38380,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = ShowLocksResponse() + self.success = CurrentNotificationEventId() self.success.read(iprot) else: iprot.skip(ftype) @@ -36960,7 +38393,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('show_locks_result') + oprot.writeStructBegin('get_current_notificationEventId_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -36988,19 +38421,15 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class heartbeat_args: +class get_notification_events_count_args: """ Attributes: - - ids + - rqst """ - thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'ids', (HeartbeatRequest, HeartbeatRequest.thrift_spec), None, ), # 1 - ) - - def __init__(self, ids=None,): - self.ids = ids + thrift_spec = None + def __init__(self, rqst=None,): + self.rqst = rqst def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37011,10 +38440,10 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: + if fid == -1: if ftype == TType.STRUCT: - self.ids = HeartbeatRequest() - self.ids.read(iprot) + self.rqst = NotificationEventsCountRequest() + self.rqst.read(iprot) else: iprot.skip(ftype) else: @@ -37026,10 +38455,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('heartbeat_args') - if self.ids is not None: - oprot.writeFieldBegin('ids', TType.STRUCT, 1) - self.ids.write(oprot) + oprot.writeStructBegin('get_notification_events_count_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, -1) + self.rqst.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37040,7 +38469,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.ids) + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -37054,25 +38483,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class heartbeat_result: +class get_notification_events_count_result: """ Attributes: - - o1 - - o2 - - o3 + - success """ thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchLockException, NoSuchLockException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 2 - (3, TType.STRUCT, 'o3', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 3 + (0, TType.STRUCT, 'success', (NotificationEventsCountResponse, NotificationEventsCountResponse.thrift_spec), None, ), # 0 ) - def __init__(self, o1=None, o2=None, o3=None,): - self.o1 = o1 - self.o2 = o2 - self.o3 = o3 + def __init__(self, success=None,): + self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37083,22 +38505,10 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.o1 = NoSuchLockException() - self.o1.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRUCT: - self.o2 = NoSuchTxnException() - self.o2.read(iprot) - else: - iprot.skip(ftype) - elif fid == 3: + if fid == 0: if ftype == TType.STRUCT: - self.o3 = TxnAbortedException() - self.o3.read(iprot) + self.success = NotificationEventsCountResponse() + self.success.read(iprot) else: iprot.skip(ftype) else: @@ -37110,18 +38520,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('heartbeat_result') - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) - oprot.writeFieldEnd() - if self.o3 is not None: - oprot.writeFieldBegin('o3', TType.STRUCT, 3) - self.o3.write(oprot) + oprot.writeStructBegin('get_notification_events_count_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37132,9 +38534,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) - value = (value * 31) ^ hash(self.o3) + value = (value * 31) ^ hash(self.success) return value def __repr__(self): @@ -37148,19 +38548,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class heartbeat_txn_range_args: +class fire_listener_event_args: """ Attributes: - - txns + - rqst """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'txns', (HeartbeatTxnRangeRequest, HeartbeatTxnRangeRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (FireEventRequest, FireEventRequest.thrift_spec), None, ), # 1 ) - def __init__(self, txns=None,): - self.txns = txns + def __init__(self, rqst=None,): + self.rqst = rqst def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37173,8 +38573,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.txns = HeartbeatTxnRangeRequest() - self.txns.read(iprot) + self.rqst = FireEventRequest() + self.rqst.read(iprot) else: iprot.skip(ftype) else: @@ -37186,10 +38586,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('heartbeat_txn_range_args') - if self.txns is not None: - oprot.writeFieldBegin('txns', TType.STRUCT, 1) - self.txns.write(oprot) + oprot.writeStructBegin('fire_listener_event_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37200,7 +38600,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.txns) + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -37214,14 +38614,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class heartbeat_txn_range_result: +class fire_listener_event_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (HeartbeatTxnRangeResponse, HeartbeatTxnRangeResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (FireEventResponse, FireEventResponse.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -37238,7 +38638,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = HeartbeatTxnRangeResponse() + self.success = FireEventResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -37251,7 +38651,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('heartbeat_txn_range_result') + oprot.writeStructBegin('fire_listener_event_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -37279,20 +38679,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class compact_args: - """ - Attributes: - - rqst - """ +class flushCache_args: thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'rqst', (CompactionRequest, CompactionRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -37302,12 +38693,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.rqst = CompactionRequest() - self.rqst.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -37317,11 +38702,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('compact_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('flushCache_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -37331,7 +38712,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -37345,7 +38725,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class compact_result: +class flushCache_result: thrift_spec = ( ) @@ -37368,7 +38748,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('compact_result') + oprot.writeStructBegin('flushCache_result') oprot.writeFieldStop() oprot.writeStructEnd() @@ -37391,19 +38771,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class compact2_args: +class cm_recycle_args: """ Attributes: - - rqst + - request """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (CompactionRequest, CompactionRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (CmRecycleRequest, CmRecycleRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, request=None,): + self.request = request def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37416,8 +38796,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = CompactionRequest() - self.rqst.read(iprot) + self.request = CmRecycleRequest() + self.request.read(iprot) else: iprot.skip(ftype) else: @@ -37429,10 +38809,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('compact2_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) + oprot.writeStructBegin('cm_recycle_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37443,7 +38823,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -37457,18 +38837,21 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class compact2_result: +class cm_recycle_result: """ Attributes: - success + - o1 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (CompactionResponse, CompactionResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (CmRecycleResponse, CmRecycleResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None,): + def __init__(self, success=None, o1=None,): self.success = success + self.o1 = o1 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37481,10 +38864,16 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = CompactionResponse() + self.success = CmRecycleResponse() self.success.read(iprot) else: iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -37494,11 +38883,15 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('compact2_result') + oprot.writeStructBegin('cm_recycle_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37509,6 +38902,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -37522,19 +38916,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class show_compact_args: +class get_file_metadata_by_expr_args: """ Attributes: - - rqst + - req """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (ShowCompactRequest, ShowCompactRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'req', (GetFileMetadataByExprRequest, GetFileMetadataByExprRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, req=None,): + self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37547,8 +38941,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = ShowCompactRequest() - self.rqst.read(iprot) + self.req = GetFileMetadataByExprRequest() + self.req.read(iprot) else: iprot.skip(ftype) else: @@ -37560,10 +38954,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('show_compact_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) + oprot.writeStructBegin('get_file_metadata_by_expr_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37574,7 +38968,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -37588,14 +38982,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class show_compact_result: +class get_file_metadata_by_expr_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (ShowCompactResponse, ShowCompactResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (GetFileMetadataByExprResult, GetFileMetadataByExprResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -37612,7 +39006,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = ShowCompactResponse() + self.success = GetFileMetadataByExprResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -37625,7 +39019,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('show_compact_result') + oprot.writeStructBegin('get_file_metadata_by_expr_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -37653,19 +39047,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class add_dynamic_partitions_args: +class get_file_metadata_args: """ Attributes: - - rqst + - req """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (AddDynamicPartitions, AddDynamicPartitions.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'req', (GetFileMetadataRequest, GetFileMetadataRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, req=None,): + self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37678,8 +39072,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = AddDynamicPartitions() - self.rqst.read(iprot) + self.req = GetFileMetadataRequest() + self.req.read(iprot) else: iprot.skip(ftype) else: @@ -37691,10 +39085,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('add_dynamic_partitions_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) + oprot.writeStructBegin('get_file_metadata_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37705,7 +39099,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -37719,22 +39113,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class add_dynamic_partitions_result: +class get_file_metadata_result: """ Attributes: - - o1 - - o2 + - success """ thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 + (0, TType.STRUCT, 'success', (GetFileMetadataResult, GetFileMetadataResult.thrift_spec), None, ), # 0 ) - def __init__(self, o1=None, o2=None,): - self.o1 = o1 - self.o2 = o2 + def __init__(self, success=None,): + self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37745,16 +39135,10 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.o1 = NoSuchTxnException() - self.o1.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: + if fid == 0: if ftype == TType.STRUCT: - self.o2 = TxnAbortedException() - self.o2.read(iprot) + self.success = GetFileMetadataResult() + self.success.read(iprot) else: iprot.skip(ftype) else: @@ -37766,14 +39150,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('add_dynamic_partitions_result') - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) + oprot.writeStructBegin('get_file_metadata_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37784,8 +39164,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.success) return value def __repr__(self): @@ -37799,19 +39178,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_next_notification_args: +class put_file_metadata_args: """ Attributes: - - rqst + - req """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (NotificationEventRequest, NotificationEventRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'req', (PutFileMetadataRequest, PutFileMetadataRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, req=None,): + self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37824,8 +39203,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = NotificationEventRequest() - self.rqst.read(iprot) + self.req = PutFileMetadataRequest() + self.req.read(iprot) else: iprot.skip(ftype) else: @@ -37837,10 +39216,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_next_notification_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) + oprot.writeStructBegin('put_file_metadata_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37851,7 +39230,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -37865,14 +39244,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_next_notification_result: +class put_file_metadata_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (NotificationEventResponse, NotificationEventResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (PutFileMetadataResult, PutFileMetadataResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -37889,7 +39268,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = NotificationEventResponse() + self.success = PutFileMetadataResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -37902,7 +39281,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_next_notification_result') + oprot.writeStructBegin('put_file_metadata_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -37930,126 +39309,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_current_notificationEventId_args: - - thrift_spec = ( - ) - - def read(self, iprot): - if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: - fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) - return - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: - oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) - return - oprot.writeStructBegin('get_current_notificationEventId_args') - oprot.writeFieldStop() - oprot.writeStructEnd() - - def validate(self): - return - - - def __hash__(self): - value = 17 - return value - - def __repr__(self): - L = ['%s=%r' % (key, value) - for key, value in self.__dict__.iteritems()] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not (self == other) - -class get_current_notificationEventId_result: +class clear_file_metadata_args: """ Attributes: - - success + - req """ thrift_spec = ( - (0, TType.STRUCT, 'success', (CurrentNotificationEventId, CurrentNotificationEventId.thrift_spec), None, ), # 0 + None, # 0 + (1, TType.STRUCT, 'req', (ClearFileMetadataRequest, ClearFileMetadataRequest.thrift_spec), None, ), # 1 ) - def __init__(self, success=None,): - self.success = success - - def read(self, iprot): - if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: - fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) - return - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - if fid == 0: - if ftype == TType.STRUCT: - self.success = CurrentNotificationEventId() - self.success.read(iprot) - else: - iprot.skip(ftype) - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: - oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) - return - oprot.writeStructBegin('get_current_notificationEventId_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) - oprot.writeFieldEnd() - oprot.writeFieldStop() - oprot.writeStructEnd() - - def validate(self): - return - - - def __hash__(self): - value = 17 - value = (value * 31) ^ hash(self.success) - return value - - def __repr__(self): - L = ['%s=%r' % (key, value) - for key, value in self.__dict__.iteritems()] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not (self == other) - -class get_notification_events_count_args: - """ - Attributes: - - rqst - """ - - thrift_spec = None - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, req=None,): + self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38060,10 +39332,10 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == -1: + if fid == 1: if ftype == TType.STRUCT: - self.rqst = NotificationEventsCountRequest() - self.rqst.read(iprot) + self.req = ClearFileMetadataRequest() + self.req.read(iprot) else: iprot.skip(ftype) else: @@ -38075,10 +39347,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_notification_events_count_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, -1) - self.rqst.write(oprot) + oprot.writeStructBegin('clear_file_metadata_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38089,7 +39361,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -38103,14 +39375,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_notification_events_count_result: +class clear_file_metadata_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (NotificationEventsCountResponse, NotificationEventsCountResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (ClearFileMetadataResult, ClearFileMetadataResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -38127,7 +39399,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = NotificationEventsCountResponse() + self.success = ClearFileMetadataResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -38140,7 +39412,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_notification_events_count_result') + oprot.writeStructBegin('clear_file_metadata_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -38168,19 +39440,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class fire_listener_event_args: +class cache_file_metadata_args: """ Attributes: - - rqst + - req """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (FireEventRequest, FireEventRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'req', (CacheFileMetadataRequest, CacheFileMetadataRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, req=None,): + self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38193,8 +39465,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = FireEventRequest() - self.rqst.read(iprot) + self.req = CacheFileMetadataRequest() + self.req.read(iprot) else: iprot.skip(ftype) else: @@ -38206,10 +39478,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('fire_listener_event_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) + oprot.writeStructBegin('cache_file_metadata_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38220,7 +39492,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -38234,14 +39506,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class fire_listener_event_result: +class cache_file_metadata_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (FireEventResponse, FireEventResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (CacheFileMetadataResult, CacheFileMetadataResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -38258,7 +39530,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = FireEventResponse() + self.success = CacheFileMetadataResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -38271,7 +39543,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('fire_listener_event_result') + oprot.writeStructBegin('cache_file_metadata_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -38299,7 +39571,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class flushCache_args: +class get_metastore_db_uuid_args: thrift_spec = ( ) @@ -38322,7 +39594,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('flushCache_args') + oprot.writeStructBegin('get_metastore_db_uuid_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -38345,11 +39617,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class flushCache_result: +class get_metastore_db_uuid_result: + """ + Attributes: + - success + - o1 + """ thrift_spec = ( + (0, TType.STRING, 'success', None, None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -38359,6 +39642,17 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 0: + if ftype == TType.STRING: + self.success = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -38368,7 +39662,15 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('flushCache_result') + oprot.writeStructBegin('get_metastore_db_uuid_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRING, 0) + oprot.writeString(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38378,6 +39680,8 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -38391,7 +39695,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class cm_recycle_args: +class create_resource_plan_args: """ Attributes: - request @@ -38399,7 +39703,7 @@ class cm_recycle_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (CmRecycleRequest, CmRecycleRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMCreateResourcePlanRequest, WMCreateResourcePlanRequest.thrift_spec), None, ), # 1 ) def __init__(self, request=None,): @@ -38416,7 +39720,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.request = CmRecycleRequest() + self.request = WMCreateResourcePlanRequest() self.request.read(iprot) else: iprot.skip(ftype) @@ -38429,7 +39733,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('cm_recycle_args') + oprot.writeStructBegin('create_resource_plan_args') if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 1) self.request.write(oprot) @@ -38457,21 +39761,27 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class cm_recycle_result: +class create_resource_plan_result: """ Attributes: - success - o1 + - o2 + - o3 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (CmRecycleResponse, CmRecycleResponse.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (0, TType.STRUCT, 'success', (WMCreateResourcePlanResponse, WMCreateResourcePlanResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (AlreadyExistsException, AlreadyExistsException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o1=None,): + def __init__(self, success=None, o1=None, o2=None, o3=None,): self.success = success self.o1 = o1 + self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38484,16 +39794,28 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = CmRecycleResponse() + self.success = WMCreateResourcePlanResponse() self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.o1 = MetaException() + self.o1 = AlreadyExistsException() self.o1.read(iprot) else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -38503,7 +39825,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('cm_recycle_result') + oprot.writeStructBegin('create_resource_plan_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -38512,6 +39834,14 @@ def write(self, oprot): oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38523,6 +39853,8 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -38536,19 +39868,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_file_metadata_by_expr_args: +class get_resource_plan_args: """ Attributes: - - req + - request """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'req', (GetFileMetadataByExprRequest, GetFileMetadataByExprRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMGetResourcePlanRequest, WMGetResourcePlanRequest.thrift_spec), None, ), # 1 ) - def __init__(self, req=None,): - self.req = req + def __init__(self, request=None,): + self.request = request def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38561,8 +39893,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.req = GetFileMetadataByExprRequest() - self.req.read(iprot) + self.request = WMGetResourcePlanRequest() + self.request.read(iprot) else: iprot.skip(ftype) else: @@ -38574,10 +39906,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_file_metadata_by_expr_args') - if self.req is not None: - oprot.writeFieldBegin('req', TType.STRUCT, 1) - self.req.write(oprot) + oprot.writeStructBegin('get_resource_plan_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38588,7 +39920,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.req) + value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -38602,18 +39934,24 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_file_metadata_by_expr_result: +class get_resource_plan_result: """ Attributes: - success + - o1 + - o2 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetFileMetadataByExprResult, GetFileMetadataByExprResult.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (WMGetResourcePlanResponse, WMGetResourcePlanResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None,): + def __init__(self, success=None, o1=None, o2=None,): self.success = success + self.o1 = o1 + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38626,10 +39964,22 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = GetFileMetadataByExprResult() + self.success = WMGetResourcePlanResponse() self.success.read(iprot) else: iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -38639,11 +39989,19 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_file_metadata_by_expr_result') + oprot.writeStructBegin('get_resource_plan_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38654,6 +40012,8 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -38667,19 +40027,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_file_metadata_args: +class get_active_resource_plan_args: """ Attributes: - - req + - request """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'req', (GetFileMetadataRequest, GetFileMetadataRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMGetActiveResourcePlanRequest, WMGetActiveResourcePlanRequest.thrift_spec), None, ), # 1 ) - def __init__(self, req=None,): - self.req = req + def __init__(self, request=None,): + self.request = request def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38692,8 +40052,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.req = GetFileMetadataRequest() - self.req.read(iprot) + self.request = WMGetActiveResourcePlanRequest() + self.request.read(iprot) else: iprot.skip(ftype) else: @@ -38705,10 +40065,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_file_metadata_args') - if self.req is not None: - oprot.writeFieldBegin('req', TType.STRUCT, 1) - self.req.write(oprot) + oprot.writeStructBegin('get_active_resource_plan_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38719,7 +40079,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.req) + value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -38733,18 +40093,21 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_file_metadata_result: +class get_active_resource_plan_result: """ Attributes: - success + - o2 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetFileMetadataResult, GetFileMetadataResult.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (WMGetActiveResourcePlanResponse, WMGetActiveResourcePlanResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None,): + def __init__(self, success=None, o2=None,): self.success = success + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38757,10 +40120,16 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = GetFileMetadataResult() + self.success = WMGetActiveResourcePlanResponse() self.success.read(iprot) else: iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -38770,11 +40139,15 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_file_metadata_result') + oprot.writeStructBegin('get_active_resource_plan_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 1) + self.o2.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38785,6 +40158,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -38798,19 +40172,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class put_file_metadata_args: +class get_all_resource_plans_args: """ Attributes: - - req + - request """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'req', (PutFileMetadataRequest, PutFileMetadataRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMGetAllResourcePlanRequest, WMGetAllResourcePlanRequest.thrift_spec), None, ), # 1 ) - def __init__(self, req=None,): - self.req = req + def __init__(self, request=None,): + self.request = request def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38823,8 +40197,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.req = PutFileMetadataRequest() - self.req.read(iprot) + self.request = WMGetAllResourcePlanRequest() + self.request.read(iprot) else: iprot.skip(ftype) else: @@ -38836,10 +40210,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('put_file_metadata_args') - if self.req is not None: - oprot.writeFieldBegin('req', TType.STRUCT, 1) - self.req.write(oprot) + oprot.writeStructBegin('get_all_resource_plans_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38850,7 +40224,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.req) + value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -38864,18 +40238,21 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class put_file_metadata_result: +class get_all_resource_plans_result: """ Attributes: - success + - o1 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (PutFileMetadataResult, PutFileMetadataResult.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (WMGetAllResourcePlanResponse, WMGetAllResourcePlanResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None,): + def __init__(self, success=None, o1=None,): self.success = success + self.o1 = o1 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38888,10 +40265,16 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = PutFileMetadataResult() + self.success = WMGetAllResourcePlanResponse() self.success.read(iprot) else: iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -38901,11 +40284,15 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('put_file_metadata_result') + oprot.writeStructBegin('get_all_resource_plans_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38916,6 +40303,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -38929,19 +40317,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class clear_file_metadata_args: +class alter_resource_plan_args: """ Attributes: - - req + - request """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'req', (ClearFileMetadataRequest, ClearFileMetadataRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMAlterResourcePlanRequest, WMAlterResourcePlanRequest.thrift_spec), None, ), # 1 ) - def __init__(self, req=None,): - self.req = req + def __init__(self, request=None,): + self.request = request def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38954,8 +40342,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.req = ClearFileMetadataRequest() - self.req.read(iprot) + self.request = WMAlterResourcePlanRequest() + self.request.read(iprot) else: iprot.skip(ftype) else: @@ -38967,10 +40355,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('clear_file_metadata_args') - if self.req is not None: - oprot.writeFieldBegin('req', TType.STRUCT, 1) - self.req.write(oprot) + oprot.writeStructBegin('alter_resource_plan_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38981,7 +40369,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.req) + value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -38995,18 +40383,27 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class clear_file_metadata_result: +class alter_resource_plan_result: """ Attributes: - success + - o1 + - o2 + - o3 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (ClearFileMetadataResult, ClearFileMetadataResult.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (WMAlterResourcePlanResponse, WMAlterResourcePlanResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None,): + def __init__(self, success=None, o1=None, o2=None, o3=None,): self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -39019,10 +40416,28 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = ClearFileMetadataResult() + self.success = WMAlterResourcePlanResponse() self.success.read(iprot) else: iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -39032,11 +40447,23 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('clear_file_metadata_result') + oprot.writeStructBegin('alter_resource_plan_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -39047,6 +40474,9 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -39060,19 +40490,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class cache_file_metadata_args: +class validate_resource_plan_args: """ Attributes: - - req + - request """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'req', (CacheFileMetadataRequest, CacheFileMetadataRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMValidateResourcePlanRequest, WMValidateResourcePlanRequest.thrift_spec), None, ), # 1 ) - def __init__(self, req=None,): - self.req = req + def __init__(self, request=None,): + self.request = request def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -39085,8 +40515,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.req = CacheFileMetadataRequest() - self.req.read(iprot) + self.request = WMValidateResourcePlanRequest() + self.request.read(iprot) else: iprot.skip(ftype) else: @@ -39098,10 +40528,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('cache_file_metadata_args') - if self.req is not None: - oprot.writeFieldBegin('req', TType.STRUCT, 1) - self.req.write(oprot) + oprot.writeStructBegin('validate_resource_plan_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -39112,7 +40542,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.req) + value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -39126,18 +40556,24 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class cache_file_metadata_result: +class validate_resource_plan_result: """ Attributes: - success + - o1 + - o2 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (CacheFileMetadataResult, CacheFileMetadataResult.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (WMValidateResourcePlanResponse, WMValidateResourcePlanResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None,): + def __init__(self, success=None, o1=None, o2=None,): self.success = success + self.o1 = o1 + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -39150,10 +40586,22 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = CacheFileMetadataResult() + self.success = WMValidateResourcePlanResponse() self.success.read(iprot) else: iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -39163,11 +40611,19 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('cache_file_metadata_result') + oprot.writeStructBegin('validate_resource_plan_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -39178,6 +40634,8 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -39191,11 +40649,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_metastore_db_uuid_args: +class drop_resource_plan_args: + """ + Attributes: + - request + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'request', (WMDropResourcePlanRequest, WMDropResourcePlanRequest.thrift_spec), None, ), # 1 ) + def __init__(self, request=None,): + self.request = request + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -39205,6 +40672,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMDropResourcePlanRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -39214,7 +40687,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_metastore_db_uuid_args') + oprot.writeStructBegin('drop_resource_plan_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -39224,6 +40701,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -39237,21 +40715,27 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_metastore_db_uuid_result: +class drop_resource_plan_result: """ Attributes: - success - o1 + - o2 + - o3 """ thrift_spec = ( - (0, TType.STRING, 'success', None, None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (0, TType.STRUCT, 'success', (WMDropResourcePlanResponse, WMDropResourcePlanResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o1=None,): + def __init__(self, success=None, o1=None, o2=None, o3=None,): self.success = success self.o1 = o1 + self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -39263,16 +40747,29 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRING: - self.success = iprot.readString() + if ftype == TType.STRUCT: + self.success = WMDropResourcePlanResponse() + self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.o1 = MetaException() + self.o1 = NoSuchObjectException() self.o1.read(iprot) else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -39282,15 +40779,23 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_metastore_db_uuid_result') + oprot.writeStructBegin('drop_resource_plan_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRING, 0) - oprot.writeString(self.success) + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -39302,6 +40807,8 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -39315,7 +40822,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class create_resource_plan_args: +class create_wm_trigger_args: """ Attributes: - request @@ -39323,7 +40830,7 @@ class create_resource_plan_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (WMCreateResourcePlanRequest, WMCreateResourcePlanRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMCreateTriggerRequest, WMCreateTriggerRequest.thrift_spec), None, ), # 1 ) def __init__(self, request=None,): @@ -39340,7 +40847,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.request = WMCreateResourcePlanRequest() + self.request = WMCreateTriggerRequest() self.request.read(iprot) else: iprot.skip(ftype) @@ -39353,7 +40860,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('create_resource_plan_args') + oprot.writeStructBegin('create_wm_trigger_args') if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 1) self.request.write(oprot) @@ -39381,27 +40888,30 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class create_resource_plan_result: +class create_wm_trigger_result: """ Attributes: - success - o1 - o2 - o3 + - o4 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (WMCreateResourcePlanResponse, WMCreateResourcePlanResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (WMCreateTriggerResponse, WMCreateTriggerResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (AlreadyExistsException, AlreadyExistsException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 2 - (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 + (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 3 + (4, TType.STRUCT, 'o4', (MetaException, MetaException.thrift_spec), None, ), # 4 ) - def __init__(self, success=None, o1=None, o2=None, o3=None,): + def __init__(self, success=None, o1=None, o2=None, o3=None, o4=None,): self.success = success self.o1 = o1 self.o2 = o2 self.o3 = o3 + self.o4 = o4 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -39414,7 +40924,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = WMCreateResourcePlanResponse() + self.success = WMCreateTriggerResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -39426,16 +40936,22 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = InvalidObjectException() + self.o2 = NoSuchObjectException() self.o2.read(iprot) else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: - self.o3 = MetaException() + self.o3 = InvalidObjectException() self.o3.read(iprot) else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = MetaException() + self.o4.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -39445,7 +40961,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('create_resource_plan_result') + oprot.writeStructBegin('create_wm_trigger_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -39462,6 +40978,10 @@ def write(self, oprot): oprot.writeFieldBegin('o3', TType.STRUCT, 3) self.o3.write(oprot) oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin('o4', TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -39475,6 +40995,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.o1) value = (value * 31) ^ hash(self.o2) value = (value * 31) ^ hash(self.o3) + value = (value * 31) ^ hash(self.o4) return value def __repr__(self): @@ -39488,7 +41009,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_resource_plan_args: +class alter_wm_trigger_args: """ Attributes: - request @@ -39496,7 +41017,7 @@ class get_resource_plan_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (WMGetResourcePlanRequest, WMGetResourcePlanRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMAlterTriggerRequest, WMAlterTriggerRequest.thrift_spec), None, ), # 1 ) def __init__(self, request=None,): @@ -39513,7 +41034,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.request = WMGetResourcePlanRequest() + self.request = WMAlterTriggerRequest() self.request.read(iprot) else: iprot.skip(ftype) @@ -39526,7 +41047,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_resource_plan_args') + oprot.writeStructBegin('alter_wm_trigger_args') if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 1) self.request.write(oprot) @@ -39554,24 +41075,27 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_resource_plan_result: +class alter_wm_trigger_result: """ Attributes: - success - o1 - o2 + - o3 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (WMGetResourcePlanResponse, WMGetResourcePlanResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (WMAlterTriggerResponse, WMAlterTriggerResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + (2, TType.STRUCT, 'o2', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o1=None, o2=None,): + def __init__(self, success=None, o1=None, o2=None, o3=None,): self.success = success self.o1 = o1 self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -39584,7 +41108,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = WMGetResourcePlanResponse() + self.success = WMAlterTriggerResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -39596,10 +41120,16 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = MetaException() + self.o2 = InvalidObjectException() self.o2.read(iprot) else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -39609,7 +41139,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_resource_plan_result') + oprot.writeStructBegin('alter_wm_trigger_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -39622,6 +41152,10 @@ def write(self, oprot): oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -39634,6 +41168,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -39647,7 +41182,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_active_resource_plan_args: +class drop_wm_trigger_args: """ Attributes: - request @@ -39655,7 +41190,7 @@ class get_active_resource_plan_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (WMGetActiveResourcePlanRequest, WMGetActiveResourcePlanRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMDropTriggerRequest, WMDropTriggerRequest.thrift_spec), None, ), # 1 ) def __init__(self, request=None,): @@ -39672,7 +41207,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.request = WMGetActiveResourcePlanRequest() + self.request = WMDropTriggerRequest() self.request.read(iprot) else: iprot.skip(ftype) @@ -39685,7 +41220,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_active_resource_plan_args') + oprot.writeStructBegin('drop_wm_trigger_args') if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 1) self.request.write(oprot) @@ -39713,21 +41248,27 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_active_resource_plan_result: +class drop_wm_trigger_result: """ Attributes: - success + - o1 - o2 + - o3 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (WMGetActiveResourcePlanResponse, WMGetActiveResourcePlanResponse.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 1 + (0, TType.STRUCT, 'success', (WMDropTriggerResponse, WMDropTriggerResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o2=None,): + def __init__(self, success=None, o1=None, o2=None, o3=None,): self.success = success + self.o1 = o1 self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -39740,159 +41281,26 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = WMGetActiveResourcePlanResponse() + self.success = WMDropTriggerResponse() self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.o2 = MetaException() - self.o2.read(iprot) - else: - iprot.skip(ftype) - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: - oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) - return - oprot.writeStructBegin('get_active_resource_plan_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) - oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 1) - self.o2.write(oprot) - oprot.writeFieldEnd() - oprot.writeFieldStop() - oprot.writeStructEnd() - - def validate(self): - return - - - def __hash__(self): - value = 17 - value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o2) - return value - - def __repr__(self): - L = ['%s=%r' % (key, value) - for key, value in self.__dict__.iteritems()] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not (self == other) - -class get_all_resource_plans_args: - """ - Attributes: - - request - """ - - thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'request', (WMGetAllResourcePlanRequest, WMGetAllResourcePlanRequest.thrift_spec), None, ), # 1 - ) - - def __init__(self, request=None,): - self.request = request - - def read(self, iprot): - if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: - fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) - return - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - if fid == 1: - if ftype == TType.STRUCT: - self.request = WMGetAllResourcePlanRequest() - self.request.read(iprot) + self.o1 = NoSuchObjectException() + self.o1.read(iprot) else: iprot.skip(ftype) - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: - oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) - return - oprot.writeStructBegin('get_all_resource_plans_args') - if self.request is not None: - oprot.writeFieldBegin('request', TType.STRUCT, 1) - self.request.write(oprot) - oprot.writeFieldEnd() - oprot.writeFieldStop() - oprot.writeStructEnd() - - def validate(self): - return - - - def __hash__(self): - value = 17 - value = (value * 31) ^ hash(self.request) - return value - - def __repr__(self): - L = ['%s=%r' % (key, value) - for key, value in self.__dict__.iteritems()] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not (self == other) - -class get_all_resource_plans_result: - """ - Attributes: - - success - - o1 - """ - - thrift_spec = ( - (0, TType.STRUCT, 'success', (WMGetAllResourcePlanResponse, WMGetAllResourcePlanResponse.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 - ) - - def __init__(self, success=None, o1=None,): - self.success = success - self.o1 = o1 - - def read(self, iprot): - if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: - fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) - return - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - if fid == 0: + elif fid == 2: if ftype == TType.STRUCT: - self.success = WMGetAllResourcePlanResponse() - self.success.read(iprot) + self.o2 = InvalidOperationException() + self.o2.read(iprot) else: iprot.skip(ftype) - elif fid == 1: + elif fid == 3: if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) + self.o3 = MetaException() + self.o3.read(iprot) else: iprot.skip(ftype) else: @@ -39904,7 +41312,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_all_resource_plans_result') + oprot.writeStructBegin('drop_wm_trigger_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -39913,6 +41321,14 @@ def write(self, oprot): oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -39924,6 +41340,8 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -39937,7 +41355,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class alter_resource_plan_args: +class get_triggers_for_resourceplan_args: """ Attributes: - request @@ -39945,7 +41363,7 @@ class alter_resource_plan_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (WMAlterResourcePlanRequest, WMAlterResourcePlanRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMGetTriggersForResourePlanRequest, WMGetTriggersForResourePlanRequest.thrift_spec), None, ), # 1 ) def __init__(self, request=None,): @@ -39962,7 +41380,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.request = WMAlterResourcePlanRequest() + self.request = WMGetTriggersForResourePlanRequest() self.request.read(iprot) else: iprot.skip(ftype) @@ -39975,7 +41393,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('alter_resource_plan_args') + oprot.writeStructBegin('get_triggers_for_resourceplan_args') if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 1) self.request.write(oprot) @@ -40003,27 +41421,24 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class alter_resource_plan_result: +class get_triggers_for_resourceplan_result: """ Attributes: - success - o1 - o2 - - o3 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (WMAlterResourcePlanResponse, WMAlterResourcePlanResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (WMGetTriggersForResourePlanResponse, WMGetTriggersForResourePlanResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 - (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None, o1=None, o2=None, o3=None,): + def __init__(self, success=None, o1=None, o2=None,): self.success = success self.o1 = o1 self.o2 = o2 - self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -40036,7 +41451,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = WMAlterResourcePlanResponse() + self.success = WMGetTriggersForResourePlanResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -40048,16 +41463,10 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = InvalidOperationException() + self.o2 = MetaException() self.o2.read(iprot) else: iprot.skip(ftype) - elif fid == 3: - if ftype == TType.STRUCT: - self.o3 = MetaException() - self.o3.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -40067,7 +41476,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('alter_resource_plan_result') + oprot.writeStructBegin('get_triggers_for_resourceplan_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -40080,10 +41489,6 @@ def write(self, oprot): oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() - if self.o3 is not None: - oprot.writeFieldBegin('o3', TType.STRUCT, 3) - self.o3.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -40096,7 +41501,6 @@ def __hash__(self): value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) value = (value * 31) ^ hash(self.o2) - value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -40110,7 +41514,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class validate_resource_plan_args: +class create_or_update_wm_pool_args: """ Attributes: - request @@ -40118,7 +41522,7 @@ class validate_resource_plan_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (WMValidateResourcePlanRequest, WMValidateResourcePlanRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMCreateOrUpdatePoolRequest, WMCreateOrUpdatePoolRequest.thrift_spec), None, ), # 1 ) def __init__(self, request=None,): @@ -40135,7 +41539,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.request = WMValidateResourcePlanRequest() + self.request = WMCreateOrUpdatePoolRequest() self.request.read(iprot) else: iprot.skip(ftype) @@ -40148,7 +41552,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('validate_resource_plan_args') + oprot.writeStructBegin('create_or_update_wm_pool_args') if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 1) self.request.write(oprot) @@ -40176,24 +41580,30 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class validate_resource_plan_result: +class create_or_update_wm_pool_result: """ Attributes: - success - o1 - o2 + - o3 + - o4 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (WMValidateResourcePlanResponse, WMValidateResourcePlanResponse.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + (0, TType.STRUCT, 'success', (WMCreateOrUpdatePoolResponse, WMCreateOrUpdatePoolResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (AlreadyExistsException, AlreadyExistsException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 3 + (4, TType.STRUCT, 'o4', (MetaException, MetaException.thrift_spec), None, ), # 4 ) - def __init__(self, success=None, o1=None, o2=None,): + def __init__(self, success=None, o1=None, o2=None, o3=None, o4=None,): self.success = success self.o1 = o1 self.o2 = o2 + self.o3 = o3 + self.o4 = o4 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -40206,22 +41616,34 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = WMValidateResourcePlanResponse() + self.success = WMCreateOrUpdatePoolResponse() self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.o1 = NoSuchObjectException() + self.o1 = AlreadyExistsException() self.o1.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = MetaException() + self.o2 = NoSuchObjectException() self.o2.read(iprot) else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidObjectException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = MetaException() + self.o4.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -40231,7 +41653,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('validate_resource_plan_result') + oprot.writeStructBegin('create_or_update_wm_pool_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -40244,6 +41666,14 @@ def write(self, oprot): oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin('o4', TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -40256,6 +41686,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) + value = (value * 31) ^ hash(self.o4) return value def __repr__(self): @@ -40269,7 +41701,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class drop_resource_plan_args: +class drop_wm_pool_args: """ Attributes: - request @@ -40277,7 +41709,7 @@ class drop_resource_plan_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (WMDropResourcePlanRequest, WMDropResourcePlanRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMDropPoolRequest, WMDropPoolRequest.thrift_spec), None, ), # 1 ) def __init__(self, request=None,): @@ -40294,7 +41726,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.request = WMDropResourcePlanRequest() + self.request = WMDropPoolRequest() self.request.read(iprot) else: iprot.skip(ftype) @@ -40307,7 +41739,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('drop_resource_plan_args') + oprot.writeStructBegin('drop_wm_pool_args') if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 1) self.request.write(oprot) @@ -40335,7 +41767,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class drop_resource_plan_result: +class drop_wm_pool_result: """ Attributes: - success @@ -40345,7 +41777,7 @@ class drop_resource_plan_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (WMDropResourcePlanResponse, WMDropResourcePlanResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (WMDropPoolResponse, WMDropPoolResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 @@ -40368,7 +41800,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = WMDropResourcePlanResponse() + self.success = WMDropPoolResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -40399,7 +41831,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('drop_resource_plan_result') + oprot.writeStructBegin('drop_wm_pool_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -40442,7 +41874,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class create_wm_trigger_args: +class create_or_update_wm_mapping_args: """ Attributes: - request @@ -40450,7 +41882,7 @@ class create_wm_trigger_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (WMCreateTriggerRequest, WMCreateTriggerRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMCreateOrUpdateMappingRequest, WMCreateOrUpdateMappingRequest.thrift_spec), None, ), # 1 ) def __init__(self, request=None,): @@ -40467,7 +41899,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.request = WMCreateTriggerRequest() + self.request = WMCreateOrUpdateMappingRequest() self.request.read(iprot) else: iprot.skip(ftype) @@ -40480,7 +41912,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('create_wm_trigger_args') + oprot.writeStructBegin('create_or_update_wm_mapping_args') if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 1) self.request.write(oprot) @@ -40508,7 +41940,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class create_wm_trigger_result: +class create_or_update_wm_mapping_result: """ Attributes: - success @@ -40519,7 +41951,7 @@ class create_wm_trigger_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (WMCreateTriggerResponse, WMCreateTriggerResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (WMCreateOrUpdateMappingResponse, WMCreateOrUpdateMappingResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (AlreadyExistsException, AlreadyExistsException.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 (3, TType.STRUCT, 'o3', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 3 @@ -40544,7 +41976,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = WMCreateTriggerResponse() + self.success = WMCreateOrUpdateMappingResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -40581,7 +42013,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('create_wm_trigger_result') + oprot.writeStructBegin('create_or_update_wm_mapping_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -40629,7 +42061,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class alter_wm_trigger_args: +class drop_wm_mapping_args: """ Attributes: - request @@ -40637,7 +42069,7 @@ class alter_wm_trigger_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (WMAlterTriggerRequest, WMAlterTriggerRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMDropMappingRequest, WMDropMappingRequest.thrift_spec), None, ), # 1 ) def __init__(self, request=None,): @@ -40654,7 +42086,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.request = WMAlterTriggerRequest() + self.request = WMDropMappingRequest() self.request.read(iprot) else: iprot.skip(ftype) @@ -40667,7 +42099,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('alter_wm_trigger_args') + oprot.writeStructBegin('drop_wm_mapping_args') if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 1) self.request.write(oprot) @@ -40695,7 +42127,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class alter_wm_trigger_result: +class drop_wm_mapping_result: """ Attributes: - success @@ -40705,9 +42137,9 @@ class alter_wm_trigger_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (WMAlterTriggerResponse, WMAlterTriggerResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (WMDropMappingResponse, WMDropMappingResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 2 + (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) @@ -40728,7 +42160,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = WMAlterTriggerResponse() + self.success = WMDropMappingResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -40740,7 +42172,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = InvalidObjectException() + self.o2 = InvalidOperationException() self.o2.read(iprot) else: iprot.skip(ftype) @@ -40759,7 +42191,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('alter_wm_trigger_result') + oprot.writeStructBegin('drop_wm_mapping_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -40802,7 +42234,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class drop_wm_trigger_args: +class create_or_drop_wm_trigger_to_pool_mapping_args: """ Attributes: - request @@ -40810,7 +42242,7 @@ class drop_wm_trigger_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (WMDropTriggerRequest, WMDropTriggerRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (WMCreateOrDropTriggerToPoolMappingRequest, WMCreateOrDropTriggerToPoolMappingRequest.thrift_spec), None, ), # 1 ) def __init__(self, request=None,): @@ -40827,7 +42259,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.request = WMDropTriggerRequest() + self.request = WMCreateOrDropTriggerToPoolMappingRequest() self.request.read(iprot) else: iprot.skip(ftype) @@ -40840,7 +42272,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('drop_wm_trigger_args') + oprot.writeStructBegin('create_or_drop_wm_trigger_to_pool_mapping_args') if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 1) self.request.write(oprot) @@ -40868,27 +42300,30 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class drop_wm_trigger_result: +class create_or_drop_wm_trigger_to_pool_mapping_result: """ Attributes: - success - o1 - o2 - o3 + - o4 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (WMDropTriggerResponse, WMDropTriggerResponse.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 - (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 + (0, TType.STRUCT, 'success', (WMCreateOrDropTriggerToPoolMappingResponse, WMCreateOrDropTriggerToPoolMappingResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (AlreadyExistsException, AlreadyExistsException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 3 + (4, TType.STRUCT, 'o4', (MetaException, MetaException.thrift_spec), None, ), # 4 ) - def __init__(self, success=None, o1=None, o2=None, o3=None,): + def __init__(self, success=None, o1=None, o2=None, o3=None, o4=None,): self.success = success self.o1 = o1 self.o2 = o2 self.o3 = o3 + self.o4 = o4 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -40901,28 +42336,34 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = WMDropTriggerResponse() + self.success = WMCreateOrDropTriggerToPoolMappingResponse() self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: - self.o1 = NoSuchObjectException() + self.o1 = AlreadyExistsException() self.o1.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = InvalidOperationException() + self.o2 = NoSuchObjectException() self.o2.read(iprot) else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: - self.o3 = MetaException() + self.o3 = InvalidObjectException() self.o3.read(iprot) else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = MetaException() + self.o4.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -40932,7 +42373,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('drop_wm_trigger_result') + oprot.writeStructBegin('create_or_drop_wm_trigger_to_pool_mapping_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -40949,165 +42390,9 @@ def write(self, oprot): oprot.writeFieldBegin('o3', TType.STRUCT, 3) self.o3.write(oprot) oprot.writeFieldEnd() - oprot.writeFieldStop() - oprot.writeStructEnd() - - def validate(self): - return - - - def __hash__(self): - value = 17 - value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) - value = (value * 31) ^ hash(self.o3) - return value - - def __repr__(self): - L = ['%s=%r' % (key, value) - for key, value in self.__dict__.iteritems()] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not (self == other) - -class get_triggers_for_resourceplan_args: - """ - Attributes: - - request - """ - - thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'request', (WMGetTriggersForResourePlanRequest, WMGetTriggersForResourePlanRequest.thrift_spec), None, ), # 1 - ) - - def __init__(self, request=None,): - self.request = request - - def read(self, iprot): - if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: - fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) - return - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - if fid == 1: - if ftype == TType.STRUCT: - self.request = WMGetTriggersForResourePlanRequest() - self.request.read(iprot) - else: - iprot.skip(ftype) - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: - oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) - return - oprot.writeStructBegin('get_triggers_for_resourceplan_args') - if self.request is not None: - oprot.writeFieldBegin('request', TType.STRUCT, 1) - self.request.write(oprot) - oprot.writeFieldEnd() - oprot.writeFieldStop() - oprot.writeStructEnd() - - def validate(self): - return - - - def __hash__(self): - value = 17 - value = (value * 31) ^ hash(self.request) - return value - - def __repr__(self): - L = ['%s=%r' % (key, value) - for key, value in self.__dict__.iteritems()] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not (self == other) - -class get_triggers_for_resourceplan_result: - """ - Attributes: - - success - - o1 - - o2 - """ - - thrift_spec = ( - (0, TType.STRUCT, 'success', (WMGetTriggersForResourePlanResponse, WMGetTriggersForResourePlanResponse.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 - ) - - def __init__(self, success=None, o1=None, o2=None,): - self.success = success - self.o1 = o1 - self.o2 = o2 - - def read(self, iprot): - if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: - fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) - return - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - if fid == 0: - if ftype == TType.STRUCT: - self.success = WMGetTriggersForResourePlanResponse() - self.success.read(iprot) - else: - iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = NoSuchObjectException() - self.o1.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRUCT: - self.o2 = MetaException() - self.o2.read(iprot) - else: - iprot.skip(ftype) - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: - oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) - return - oprot.writeStructBegin('get_triggers_for_resourceplan_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) - oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) + if self.o4 is not None: + oprot.writeFieldBegin('o4', TType.STRUCT, 4) + self.o4.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -41121,6 +42406,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) + value = (value * 31) ^ hash(self.o4) return value def __repr__(self): diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 7d7d28d824..3b7555c638 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -14763,7 +14763,7 @@ class WMMapping: - resourcePlanName - entityType - entityName - - poolName + - poolPath - ordering """ @@ -14772,15 +14772,15 @@ class WMMapping: (1, TType.STRING, 'resourcePlanName', None, None, ), # 1 (2, TType.STRING, 'entityType', None, None, ), # 2 (3, TType.STRING, 'entityName', None, None, ), # 3 - (4, TType.STRING, 'poolName', None, None, ), # 4 + (4, TType.STRING, 'poolPath', None, None, ), # 4 (5, TType.I32, 'ordering', None, None, ), # 5 ) - def __init__(self, resourcePlanName=None, entityType=None, entityName=None, poolName=None, ordering=None,): + def __init__(self, resourcePlanName=None, entityType=None, entityName=None, poolPath=None, ordering=None,): self.resourcePlanName = resourcePlanName self.entityType = entityType self.entityName = entityName - self.poolName = poolName + self.poolPath = poolPath self.ordering = ordering def read(self, iprot): @@ -14809,7 +14809,7 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: - self.poolName = iprot.readString() + self.poolPath = iprot.readString() else: iprot.skip(ftype) elif fid == 5: @@ -14839,9 +14839,9 @@ def write(self, oprot): oprot.writeFieldBegin('entityName', TType.STRING, 3) oprot.writeString(self.entityName) oprot.writeFieldEnd() - if self.poolName is not None: - oprot.writeFieldBegin('poolName', TType.STRING, 4) - oprot.writeString(self.poolName) + if self.poolPath is not None: + oprot.writeFieldBegin('poolPath', TType.STRING, 4) + oprot.writeString(self.poolPath) oprot.writeFieldEnd() if self.ordering is not None: oprot.writeFieldBegin('ordering', TType.I32, 5) @@ -14865,7 +14865,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.resourcePlanName) value = (value * 31) ^ hash(self.entityType) value = (value * 31) ^ hash(self.entityName) - value = (value * 31) ^ hash(self.poolName) + value = (value * 31) ^ hash(self.poolPath) value = (value * 31) ^ hash(self.ordering) return value @@ -16481,6 +16481,655 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class WMCreateOrUpdatePoolRequest: + """ + Attributes: + - pool + - newPoolPath + - update + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'pool', (WMPool, WMPool.thrift_spec), None, ), # 1 + (2, TType.STRING, 'newPoolPath', None, None, ), # 2 + (3, TType.BOOL, 'update', None, None, ), # 3 + ) + + def __init__(self, pool=None, newPoolPath=None, update=None,): + self.pool = pool + self.newPoolPath = newPoolPath + self.update = update + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.pool = WMPool() + self.pool.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.newPoolPath = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.update = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WMCreateOrUpdatePoolRequest') + if self.pool is not None: + oprot.writeFieldBegin('pool', TType.STRUCT, 1) + self.pool.write(oprot) + oprot.writeFieldEnd() + if self.newPoolPath is not None: + oprot.writeFieldBegin('newPoolPath', TType.STRING, 2) + oprot.writeString(self.newPoolPath) + oprot.writeFieldEnd() + if self.update is not None: + oprot.writeFieldBegin('update', TType.BOOL, 3) + oprot.writeBool(self.update) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.pool) + value = (value * 31) ^ hash(self.newPoolPath) + value = (value * 31) ^ hash(self.update) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class WMCreateOrUpdatePoolResponse: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WMCreateOrUpdatePoolResponse') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class WMDropPoolRequest: + """ + Attributes: + - resourcePlanName + - poolPath + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'resourcePlanName', None, None, ), # 1 + (2, TType.STRING, 'poolPath', None, None, ), # 2 + ) + + def __init__(self, resourcePlanName=None, poolPath=None,): + self.resourcePlanName = resourcePlanName + self.poolPath = poolPath + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.poolPath = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WMDropPoolRequest') + if self.resourcePlanName is not None: + oprot.writeFieldBegin('resourcePlanName', TType.STRING, 1) + oprot.writeString(self.resourcePlanName) + oprot.writeFieldEnd() + if self.poolPath is not None: + oprot.writeFieldBegin('poolPath', TType.STRING, 2) + oprot.writeString(self.poolPath) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.resourcePlanName) + value = (value * 31) ^ hash(self.poolPath) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class WMDropPoolResponse: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WMDropPoolResponse') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class WMCreateOrUpdateMappingRequest: + """ + Attributes: + - mapping + - update + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'mapping', (WMMapping, WMMapping.thrift_spec), None, ), # 1 + (2, TType.BOOL, 'update', None, None, ), # 2 + ) + + def __init__(self, mapping=None, update=None,): + self.mapping = mapping + self.update = update + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.mapping = WMMapping() + self.mapping.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.update = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WMCreateOrUpdateMappingRequest') + if self.mapping is not None: + oprot.writeFieldBegin('mapping', TType.STRUCT, 1) + self.mapping.write(oprot) + oprot.writeFieldEnd() + if self.update is not None: + oprot.writeFieldBegin('update', TType.BOOL, 2) + oprot.writeBool(self.update) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.mapping) + value = (value * 31) ^ hash(self.update) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class WMCreateOrUpdateMappingResponse: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WMCreateOrUpdateMappingResponse') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class WMDropMappingRequest: + """ + Attributes: + - mapping + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'mapping', (WMMapping, WMMapping.thrift_spec), None, ), # 1 + ) + + def __init__(self, mapping=None,): + self.mapping = mapping + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.mapping = WMMapping() + self.mapping.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WMDropMappingRequest') + if self.mapping is not None: + oprot.writeFieldBegin('mapping', TType.STRUCT, 1) + self.mapping.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.mapping) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class WMDropMappingResponse: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WMDropMappingResponse') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class WMCreateOrDropTriggerToPoolMappingRequest: + """ + Attributes: + - resourcePlanName + - triggerName + - poolPath + - drop + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'resourcePlanName', None, None, ), # 1 + (2, TType.STRING, 'triggerName', None, None, ), # 2 + (3, TType.STRING, 'poolPath', None, None, ), # 3 + (4, TType.BOOL, 'drop', None, None, ), # 4 + ) + + def __init__(self, resourcePlanName=None, triggerName=None, poolPath=None, drop=None,): + self.resourcePlanName = resourcePlanName + self.triggerName = triggerName + self.poolPath = poolPath + self.drop = drop + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.triggerName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.poolPath = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.drop = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WMCreateOrDropTriggerToPoolMappingRequest') + if self.resourcePlanName is not None: + oprot.writeFieldBegin('resourcePlanName', TType.STRING, 1) + oprot.writeString(self.resourcePlanName) + oprot.writeFieldEnd() + if self.triggerName is not None: + oprot.writeFieldBegin('triggerName', TType.STRING, 2) + oprot.writeString(self.triggerName) + oprot.writeFieldEnd() + if self.poolPath is not None: + oprot.writeFieldBegin('poolPath', TType.STRING, 3) + oprot.writeString(self.poolPath) + oprot.writeFieldEnd() + if self.drop is not None: + oprot.writeFieldBegin('drop', TType.BOOL, 4) + oprot.writeBool(self.drop) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.resourcePlanName) + value = (value * 31) ^ hash(self.triggerName) + value = (value * 31) ^ hash(self.poolPath) + value = (value * 31) ^ hash(self.drop) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class WMCreateOrDropTriggerToPoolMappingResponse: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('WMCreateOrDropTriggerToPoolMappingResponse') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class MetaException(TException): """ Attributes: diff --git standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index 192f881294..73c69f026f 100644 --- standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -3334,14 +3334,14 @@ class WMMapping RESOURCEPLANNAME = 1 ENTITYTYPE = 2 ENTITYNAME = 3 - POOLNAME = 4 + POOLPATH = 4 ORDERING = 5 FIELDS = { RESOURCEPLANNAME => {:type => ::Thrift::Types::STRING, :name => 'resourcePlanName'}, ENTITYTYPE => {:type => ::Thrift::Types::STRING, :name => 'entityType'}, ENTITYNAME => {:type => ::Thrift::Types::STRING, :name => 'entityName'}, - POOLNAME => {:type => ::Thrift::Types::STRING, :name => 'poolName', :optional => true}, + POOLPATH => {:type => ::Thrift::Types::STRING, :name => 'poolPath', :optional => true}, ORDERING => {:type => ::Thrift::Types::I32, :name => 'ordering', :optional => true} } @@ -3753,6 +3753,175 @@ class WMGetTriggersForResourePlanResponse ::Thrift::Struct.generate_accessors self end +class WMCreateOrUpdatePoolRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + POOL = 1 + NEWPOOLPATH = 2 + UPDATE = 3 + + FIELDS = { + POOL => {:type => ::Thrift::Types::STRUCT, :name => 'pool', :class => ::WMPool, :optional => true}, + NEWPOOLPATH => {:type => ::Thrift::Types::STRING, :name => 'newPoolPath', :optional => true}, + UPDATE => {:type => ::Thrift::Types::BOOL, :name => 'update', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class WMCreateOrUpdatePoolResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class WMDropPoolRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + RESOURCEPLANNAME = 1 + POOLPATH = 2 + + FIELDS = { + RESOURCEPLANNAME => {:type => ::Thrift::Types::STRING, :name => 'resourcePlanName', :optional => true}, + POOLPATH => {:type => ::Thrift::Types::STRING, :name => 'poolPath', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class WMDropPoolResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class WMCreateOrUpdateMappingRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + MAPPING = 1 + UPDATE = 2 + + FIELDS = { + MAPPING => {:type => ::Thrift::Types::STRUCT, :name => 'mapping', :class => ::WMMapping, :optional => true}, + UPDATE => {:type => ::Thrift::Types::BOOL, :name => 'update', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class WMCreateOrUpdateMappingResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class WMDropMappingRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + MAPPING = 1 + + FIELDS = { + MAPPING => {:type => ::Thrift::Types::STRUCT, :name => 'mapping', :class => ::WMMapping, :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class WMDropMappingResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class WMCreateOrDropTriggerToPoolMappingRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + RESOURCEPLANNAME = 1 + TRIGGERNAME = 2 + POOLPATH = 3 + DROP = 4 + + FIELDS = { + RESOURCEPLANNAME => {:type => ::Thrift::Types::STRING, :name => 'resourcePlanName', :optional => true}, + TRIGGERNAME => {:type => ::Thrift::Types::STRING, :name => 'triggerName', :optional => true}, + POOLPATH => {:type => ::Thrift::Types::STRING, :name => 'poolPath', :optional => true}, + DROP => {:type => ::Thrift::Types::BOOL, :name => 'drop', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class WMCreateOrDropTriggerToPoolMappingResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + class MetaException < ::Thrift::Exception include ::Thrift::Struct, ::Thrift::Struct_Union def initialize(message=nil) diff --git standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 9fd70458b3..d415863356 100644 --- standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -2885,6 +2885,99 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_triggers_for_resourceplan failed: unknown result') end + def create_or_update_wm_pool(request) + send_create_or_update_wm_pool(request) + return recv_create_or_update_wm_pool() + end + + def send_create_or_update_wm_pool(request) + send_message('create_or_update_wm_pool', Create_or_update_wm_pool_args, :request => request) + end + + def recv_create_or_update_wm_pool() + result = receive_message(Create_or_update_wm_pool_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + raise result.o4 unless result.o4.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'create_or_update_wm_pool failed: unknown result') + end + + def drop_wm_pool(request) + send_drop_wm_pool(request) + return recv_drop_wm_pool() + end + + def send_drop_wm_pool(request) + send_message('drop_wm_pool', Drop_wm_pool_args, :request => request) + end + + def recv_drop_wm_pool() + result = receive_message(Drop_wm_pool_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_wm_pool failed: unknown result') + end + + def create_or_update_wm_mapping(request) + send_create_or_update_wm_mapping(request) + return recv_create_or_update_wm_mapping() + end + + def send_create_or_update_wm_mapping(request) + send_message('create_or_update_wm_mapping', Create_or_update_wm_mapping_args, :request => request) + end + + def recv_create_or_update_wm_mapping() + result = receive_message(Create_or_update_wm_mapping_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + raise result.o4 unless result.o4.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'create_or_update_wm_mapping failed: unknown result') + end + + def drop_wm_mapping(request) + send_drop_wm_mapping(request) + return recv_drop_wm_mapping() + end + + def send_drop_wm_mapping(request) + send_message('drop_wm_mapping', Drop_wm_mapping_args, :request => request) + end + + def recv_drop_wm_mapping() + result = receive_message(Drop_wm_mapping_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_wm_mapping failed: unknown result') + end + + def create_or_drop_wm_trigger_to_pool_mapping(request) + send_create_or_drop_wm_trigger_to_pool_mapping(request) + return recv_create_or_drop_wm_trigger_to_pool_mapping() + end + + def send_create_or_drop_wm_trigger_to_pool_mapping(request) + send_message('create_or_drop_wm_trigger_to_pool_mapping', Create_or_drop_wm_trigger_to_pool_mapping_args, :request => request) + end + + def recv_create_or_drop_wm_trigger_to_pool_mapping() + result = receive_message(Create_or_drop_wm_trigger_to_pool_mapping_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + raise result.o4 unless result.o4.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'create_or_drop_wm_trigger_to_pool_mapping failed: unknown result') + end + end class Processor < ::FacebookService::Processor @@ -5029,6 +5122,87 @@ module ThriftHiveMetastore write_result(result, oprot, 'get_triggers_for_resourceplan', seqid) end + def process_create_or_update_wm_pool(seqid, iprot, oprot) + args = read_args(iprot, Create_or_update_wm_pool_args) + result = Create_or_update_wm_pool_result.new() + begin + result.success = @handler.create_or_update_wm_pool(args.request) + rescue ::AlreadyExistsException => o1 + result.o1 = o1 + rescue ::NoSuchObjectException => o2 + result.o2 = o2 + rescue ::InvalidObjectException => o3 + result.o3 = o3 + rescue ::MetaException => o4 + result.o4 = o4 + end + write_result(result, oprot, 'create_or_update_wm_pool', seqid) + end + + def process_drop_wm_pool(seqid, iprot, oprot) + args = read_args(iprot, Drop_wm_pool_args) + result = Drop_wm_pool_result.new() + begin + result.success = @handler.drop_wm_pool(args.request) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::InvalidOperationException => o2 + result.o2 = o2 + rescue ::MetaException => o3 + result.o3 = o3 + end + write_result(result, oprot, 'drop_wm_pool', seqid) + end + + def process_create_or_update_wm_mapping(seqid, iprot, oprot) + args = read_args(iprot, Create_or_update_wm_mapping_args) + result = Create_or_update_wm_mapping_result.new() + begin + result.success = @handler.create_or_update_wm_mapping(args.request) + rescue ::AlreadyExistsException => o1 + result.o1 = o1 + rescue ::NoSuchObjectException => o2 + result.o2 = o2 + rescue ::InvalidObjectException => o3 + result.o3 = o3 + rescue ::MetaException => o4 + result.o4 = o4 + end + write_result(result, oprot, 'create_or_update_wm_mapping', seqid) + end + + def process_drop_wm_mapping(seqid, iprot, oprot) + args = read_args(iprot, Drop_wm_mapping_args) + result = Drop_wm_mapping_result.new() + begin + result.success = @handler.drop_wm_mapping(args.request) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::InvalidOperationException => o2 + result.o2 = o2 + rescue ::MetaException => o3 + result.o3 = o3 + end + write_result(result, oprot, 'drop_wm_mapping', seqid) + end + + def process_create_or_drop_wm_trigger_to_pool_mapping(seqid, iprot, oprot) + args = read_args(iprot, Create_or_drop_wm_trigger_to_pool_mapping_args) + result = Create_or_drop_wm_trigger_to_pool_mapping_result.new() + begin + result.success = @handler.create_or_drop_wm_trigger_to_pool_mapping(args.request) + rescue ::AlreadyExistsException => o1 + result.o1 = o1 + rescue ::NoSuchObjectException => o2 + result.o2 = o2 + rescue ::InvalidObjectException => o3 + result.o3 = o3 + rescue ::MetaException => o4 + result.o4 = o4 + end + write_result(result, oprot, 'create_or_drop_wm_trigger_to_pool_mapping', seqid) + end + end # HELPER FUNCTIONS AND STRUCTURES @@ -11490,5 +11664,201 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Create_or_update_wm_pool_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQUEST = 1 + + FIELDS = { + REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::WMCreateOrUpdatePoolRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Create_or_update_wm_pool_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + O3 = 3 + O4 = 4 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::WMCreateOrUpdatePoolResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::AlreadyExistsException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::InvalidObjectException}, + O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Drop_wm_pool_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQUEST = 1 + + FIELDS = { + REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::WMDropPoolRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Drop_wm_pool_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + O3 = 3 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::WMDropPoolResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidOperationException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Create_or_update_wm_mapping_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQUEST = 1 + + FIELDS = { + REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::WMCreateOrUpdateMappingRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Create_or_update_wm_mapping_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + O3 = 3 + O4 = 4 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::WMCreateOrUpdateMappingResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::AlreadyExistsException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::InvalidObjectException}, + O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Drop_wm_mapping_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQUEST = 1 + + FIELDS = { + REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::WMDropMappingRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Drop_wm_mapping_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + O3 = 3 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::WMDropMappingResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidOperationException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Create_or_drop_wm_trigger_to_pool_mapping_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQUEST = 1 + + FIELDS = { + REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::WMCreateOrDropTriggerToPoolMappingRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Create_or_drop_wm_trigger_to_pool_mapping_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + O3 = 3 + O4 = 4 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::WMCreateOrDropTriggerToPoolMappingResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::AlreadyExistsException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::InvalidObjectException}, + O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + end diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index db60ff29a8..9af0c6ae5f 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -73,7 +73,10 @@ import org.apache.hadoop.hive.metastore.model.MNotificationNextId; import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics; import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics; +import org.apache.hadoop.hive.metastore.model.MWMMapping; +import org.apache.hadoop.hive.metastore.model.MWMPool; import org.apache.hadoop.hive.metastore.model.MWMResourcePlan; +import org.apache.hadoop.hive.metastore.model.MWMTrigger; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode; @@ -226,6 +229,9 @@ private boolean ensureDbInit() { initQueries.add(pm.newQuery(MNotificationLog.class, "dbName == ''")); initQueries.add(pm.newQuery(MNotificationNextId.class, "nextEventId < -1")); initQueries.add(pm.newQuery(MWMResourcePlan.class, "name == ''")); + initQueries.add(pm.newQuery(MWMTrigger.class, "name == ''")); + initQueries.add(pm.newQuery(MWMPool.class, "path == ''")); + initQueries.add(pm.newQuery(MWMMapping.class, "entityType == ''")); Query q; while ((q = initQueries.peekFirst()) != null) { q.execute(); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index de7dc2d9d8..2fbb371ae5 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.metastore.api.WMPoolTrigger; import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.metastore.model.MWMMapping; +import org.apache.hadoop.hive.metastore.model.MWMMapping.EntityType; import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.model.MWMPool; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; @@ -9528,20 +9529,26 @@ private void checkForConstraintException(Exception e, String msg) throws Already @Override public void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize) - throws AlreadyExistsException, MetaException { + throws AlreadyExistsException, InvalidObjectException, MetaException { boolean commited = false; String rpName = normalizeIdentifier(resourcePlan.getName()); Integer queryParallelism = resourcePlan.isSetQueryParallelism() ? resourcePlan.getQueryParallelism() : null; MWMResourcePlan rp = new MWMResourcePlan( rpName, queryParallelism, MWMResourcePlan.Status.DISABLED); + if (rpName.isEmpty()) { + throw new InvalidObjectException("Resource name cannot be empty."); + } + if (queryParallelism != null && queryParallelism <= 0) { + throw new InvalidObjectException("Query parallelism should be positive."); + } try { openTransaction(); pm.makePersistent(rp); // TODO: ideally, this should be moved outside to HiveMetaStore to be shared between // all the RawStore-s. Right now there's no method to create a pool. if (defaultPoolSize > 0) { - MWMPool defaultPool = new MWMPool(rp, "default", null, 1.0, defaultPoolSize, null); + MWMPool defaultPool = new MWMPool(rp, "default", 1.0, defaultPoolSize, null); pm.makePersistent(defaultPool); rp.setPools(Sets.newHashSet(defaultPool)); rp.setDefaultPool(defaultPool); @@ -9609,7 +9616,7 @@ private WMMapping fromMMapping(MWMMapping mMapping, String rpName) { WMMapping result = new WMMapping(rpName, mMapping.getEntityType().toString(), mMapping.getEntityName()); if (mMapping.getPool() != null) { - result.setPoolName(mMapping.getPool().getPath()); + result.setPoolPath(mMapping.getPool().getPath()); } if (mMapping.getOrdering() != null) { result.setOrdering(mMapping.getOrdering()); @@ -9622,7 +9629,7 @@ public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException return fromMResourcePlan(getMWMResourcePlan(name)); } - public MWMResourcePlan getMWMResourcePlan(String name) throws NoSuchObjectException { + private MWMResourcePlan getMWMResourcePlan(String name) throws NoSuchObjectException { MWMResourcePlan resourcePlan; boolean commited = false; Query query = null; @@ -9689,9 +9696,16 @@ public WMFullResourcePlan alterResourcePlan( throw new NoSuchObjectException("Cannot find resource plan: " + name); } if (!resourcePlan.getName().equals(name)) { + String newName = normalizeIdentifier(resourcePlan.getName()); + if (newName.isEmpty()) { + throw new InvalidOperationException("Cannot rename to emtpy value."); + } mResourcePlan.setName(resourcePlan.getName()); } if (resourcePlan.isSetQueryParallelism()) { + if (resourcePlan.getQueryParallelism() <= 0) { + throw new InvalidOperationException("queryParallelism should be positive."); + } mResourcePlan.setQueryParallelism(resourcePlan.getQueryParallelism()); } if (resourcePlan.isSetStatus()) { @@ -9940,6 +9954,24 @@ public void alterWMTrigger(WMTrigger trigger) } } + private MWMTrigger getTrigger(MWMResourcePlan resourcePlan, String triggerName) { + boolean commited = false; + Query query = null; + try { + openTransaction(); + // Get the MWMTrigger object from DN + query = pm.newQuery(MWMTrigger.class, "resourcePlan == rp && name == triggerName"); + query.declareParameters("MWMResourcePlan rp, java.lang.String triggerName"); + query.setUnique(true); + MWMTrigger mTrigger = (MWMTrigger) query.execute(resourcePlan, triggerName); + pm.retrieve(mTrigger); + commited = commitTransaction(); + return mTrigger; + } finally { + rollbackAndCleanup(commited, query); + } + } + @Override public void dropWMTrigger(String resourcePlanName, String triggerName) throws NoSuchObjectException, InvalidOperationException, MetaException { @@ -9956,7 +9988,7 @@ public void dropWMTrigger(String resourcePlanName, String triggerName) } query = pm.newQuery(MWMTrigger.class, "resourcePlan == rp && name == triggerName"); query.declareParameters("MWMResourcePlan rp, java.lang.String triggerName"); - if (query.deletePersistentAll(resourcePlan, triggerName) == 0) { + if (query.deletePersistentAll(resourcePlan, triggerName) != 1) { throw new NoSuchObjectException("Cannot delete trigger: " + triggerName); } commited = commitTransaction(); @@ -9998,4 +10030,287 @@ private WMTrigger fromMWMTrigger(MWMTrigger mTrigger, String resourcePlanName) { trigger.setActionExpression(mTrigger.getActionExpression()); return trigger; } + + @Override + public void createOrUpdatePool(WMPool pool, String newPoolPath, boolean update) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, + MetaException { + if (!update && newPoolPath != null) { + throw new InvalidOperationException("Unexpected move while creating pool."); + } + boolean commited = false; + Query query = null; + try { + openTransaction(); + MWMResourcePlan resourcePlan = getMWMResourcePlan( + normalizeIdentifier(pool.getResourcePlanName())); + if (resourcePlan.getStatus() != MWMResourcePlan.Status.DISABLED) { + throw new InvalidOperationException("Resource plan must be disabled to edit it."); + } + + // Get the MWMTrigger object from DN + query = pm.newQuery(MWMPool.class, "resourcePlan == rp && path == poolPath"); + query.declareParameters("MWMResourcePlan rp, java.lang.String poolPath"); + query.setUnique(true); + MWMPool mPool = (MWMPool) query.execute(resourcePlan, pool.getPoolPath()); + + if (!update) { + if (mPool != null) { + throw new AlreadyExistsException("Pool with path: " + pool.getPoolPath() + + " already exists in resource plan: " + pool.getResourcePlanName()); + } + if (!poolParentExists(resourcePlan, pool.getPoolPath())) { + throw new NoSuchObjectException("Pool path is invalid, the parent does not exist"); + } + mPool = new MWMPool(resourcePlan, pool.getPoolPath(), pool.getAllocFraction(), + pool.getQueryParallelism(), pool.getSchedulingPolicy()); + pm.makePersistent(mPool); + } else { + if (mPool == null) { + throw new NoSuchObjectException("Cannot find pool with path: " + pool.getPoolPath() + + " in resource plan: " + pool.getResourcePlanName()); + } + pm.retrieve(mPool); + if (pool.isSetAllocFraction()) { + mPool.setAllocFraction(pool.getAllocFraction()); + } + if (pool.isSetQueryParallelism()) { + mPool.setQueryParallelism(pool.getQueryParallelism()); + } + if (pool.isSetSchedulingPolicy()) { + mPool.setSchedulingPolicy(pool.getSchedulingPolicy()); + } + if (newPoolPath != null && !pool.getPoolPath().equals(newPoolPath)) { + moveDescendents(resourcePlan, mPool.getPath(), newPoolPath); + mPool.setPath(newPoolPath); + } + } + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, query); + } + } + + private MWMPool getPool(MWMResourcePlan resourcePlan, String poolPath) { + boolean commited = false; + Query query = null; + try { + openTransaction(); + query = pm.newQuery(MWMPool.class, "resourcePlan == rp && path == poolPath"); + query.declareParameters("MWMResourcePlan rp, java.lang.String poolPath"); + query.setUnique(true); + MWMPool mPool = (MWMPool) query.execute(resourcePlan, poolPath); + commited = commitTransaction(); + return mPool; + } finally { + rollbackAndCleanup(commited, query); + } + } + + private void moveDescendents(MWMResourcePlan resourcePlan, String path, String newPoolPath) + throws NoSuchObjectException { + if (!poolParentExists(resourcePlan, newPoolPath)) { + throw new NoSuchObjectException("Pool path is invalid, the parent does not exist"); + } + boolean commited = false; + Query query = null; + openTransaction(); + try { + query = pm.newQuery(MWMPool.class, "resourcePlan == rp && path.startsWith(poolPath)"); + query.declareParameters("MWMResourcePlan rp, java.lang.String poolPath"); + List descPools = (List) query.execute(resourcePlan, path + "."); + pm.retrieveAll(descPools); + for (MWMPool pool : descPools) { + pool.setPath(newPoolPath + pool.getPath().substring(path.length())); + } + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, query); + } + } + + private boolean poolParentExists(MWMResourcePlan resourcePlan, String poolPath) { + int idx = poolPath.lastIndexOf('.'); + if (idx == -1) { + return true; + } + String parent = poolPath.substring(0, idx); + boolean commited = false; + Query query = null; + openTransaction(); + try { + query = pm.newQuery(MWMPool.class, "resourcePlan == rp && path == poolPath"); + query.declareParameters("MWMResourcePlan rp, java.lang.String poolPath"); + query.setUnique(true); + MWMPool pool = (MWMPool) query.execute(resourcePlan, parent); + pm.retrieve(pool); + commited = commitTransaction(); + return pool != null; + } finally { + rollbackAndCleanup(commited, query); + } + } + + @Override + public void dropWMPool(String resourcePlanName, String poolPath) + throws NoSuchObjectException, InvalidOperationException, MetaException { + resourcePlanName = normalizeIdentifier(resourcePlanName); + poolPath = normalizeIdentifier(poolPath); + + boolean commited = false; + Query query = null; + try { + openTransaction(); + MWMResourcePlan resourcePlan = getMWMResourcePlan(resourcePlanName); + if (resourcePlan.getStatus() != MWMResourcePlan.Status.DISABLED) { + throw new InvalidOperationException("Resource plan must be disabled to edit it."); + } + if (poolHasChildren(resourcePlan, poolPath)) { + throw new InvalidOperationException("Pool has children cannot drop."); + } + query = pm.newQuery(MWMPool.class, "resourcePlan == rp && path.startsWith(poolPath)"); + query.declareParameters("MWMResourcePlan rp, java.lang.String poolPath"); + if (query.deletePersistentAll(resourcePlan, poolPath) != 1) { + throw new NoSuchObjectException("Cannot delete pool: " + poolPath); + } + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, query); + } + } + + private boolean poolHasChildren(MWMResourcePlan resourcePlan, String poolPath) { + boolean commited = false; + Query query = null; + try { + openTransaction(); + query = pm.newQuery(MWMPool.class, "resourcePlan == rp && path.startsWith(poolPath)"); + query.declareParameters("MWMResourcePlan rp, java.lang.String poolPath"); + query.setResult("count(this)"); + query.setUnique(true); + Long count = (Long) query.execute(resourcePlan, poolPath + "."); + commited = commitTransaction(); + return count != null && count > 0; + } finally { + rollbackAndCleanup(commited, query); + } + } + + @Override + public void createOrUpdateWMMapping(WMMapping mapping, boolean update) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, + MetaException { + String resourcePlanName = normalizeIdentifier(mapping.getResourcePlanName()); + EntityType entityType = EntityType.valueOf(mapping.getEntityType().trim().toUpperCase()); + String entityName = normalizeIdentifier(mapping.getEntityName()); + boolean commited = false; + Query query = null; + try { + openTransaction(); + MWMResourcePlan resourcePlan = getMWMResourcePlan(resourcePlanName); + if (resourcePlan.getStatus() != MWMResourcePlan.Status.DISABLED) { + throw new InvalidOperationException("Resource plan must be disabled to edit it."); + } + MWMPool pool = getPool(resourcePlan, mapping.getPoolPath()); + if (!update) { + MWMMapping mMapping = new MWMMapping(resourcePlan, entityType, entityName, pool, + mapping.getOrdering()); + pm.makePersistent(mMapping); + } else { + query = pm.newQuery(MWMPool.class, "resourcePlan == rp && entityType == type " + + "&& entityName == name"); + query.declareParameters( + "MWMResourcePlan rp, java.lang.String type, java.lang.String name"); + query.setUnique(true); + MWMMapping mMapping = (MWMMapping) query.execute(resourcePlan, entityType, entityName); + mMapping.setPool(pool); + } + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, query); + } + } + + @Override + public void dropWMMapping(WMMapping mapping) + throws NoSuchObjectException, InvalidOperationException, MetaException { + String resourcePlanName = normalizeIdentifier(mapping.getResourcePlanName()); + String entityType = mapping.getEntityType().trim().toUpperCase(); + String entityName = normalizeIdentifier(mapping.getEntityName()); + boolean commited = false; + Query query = null; + try { + openTransaction(); + MWMResourcePlan resourcePlan = getMWMResourcePlan(resourcePlanName); + if (resourcePlan.getStatus() != MWMResourcePlan.Status.DISABLED) { + throw new InvalidOperationException("Resource plan must be disabled to edit it."); + } + query = pm.newQuery(MWMMapping.class, + "resourcePlan == rp && entityType == type && entityName == name"); + query.declareParameters("MWMResourcePlan rp, java.lang.String type, java.lang.String name"); + if (query.deletePersistentAll(resourcePlan, entityType, entityName) != 1) { + throw new NoSuchObjectException("Cannot delete mapping."); + } + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, query); + } + } + + @Override + public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath) throws AlreadyExistsException, NoSuchObjectException, + InvalidOperationException, MetaException { + resourcePlanName = normalizeIdentifier(resourcePlanName); + triggerName = normalizeIdentifier(triggerName); + poolPath = normalizeIdentifier(poolPath); + boolean commited = false; + try { + openTransaction(); + MWMResourcePlan resourcePlan = getMWMResourcePlan(resourcePlanName); + MWMPool pool = getPool(resourcePlan, poolPath); + MWMTrigger trigger = getTrigger(resourcePlan, triggerName); + if (pool == null) { + throw new NoSuchObjectException("Cannot find pool with path: " + poolPath); + } + if (trigger == null) { + throw new NoSuchObjectException("Cannot find trigger with name: " + triggerName); + } + pool.getTriggers().add(trigger); + trigger.getPools().add(pool); + pm.makePersistent(pool); + pm.makePersistent(trigger); + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, (Query)null); + } + } + + @Override + public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException { + resourcePlanName = normalizeIdentifier(resourcePlanName); + triggerName = normalizeIdentifier(triggerName); + poolPath = normalizeIdentifier(poolPath); + boolean commited = false; + try { + openTransaction(); + MWMResourcePlan resourcePlan = getMWMResourcePlan(resourcePlanName); + MWMPool pool = getPool(resourcePlan, poolPath); + MWMTrigger trigger = getTrigger(resourcePlan, triggerName); + if (pool == null) { + throw new NoSuchObjectException("Cannot find pool with path: " + poolPath); + } + if (trigger == null) { + throw new NoSuchObjectException("Cannot find trigger with name: " + triggerName); + } + pool.getTriggers().remove(trigger); + trigger.getPools().remove(pool); + pm.makePersistent(pool); + pm.makePersistent(trigger); + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, (Query)null); + } + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index db148a1769..4636b168c4 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -72,6 +72,8 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -752,7 +754,7 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] String getMetastoreDbUuid() throws MetaException; void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize) - throws AlreadyExistsException, MetaException; + throws AlreadyExistsException, MetaException, InvalidObjectException; WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException, MetaException; @@ -782,4 +784,25 @@ void dropWMTrigger(String resourcePlanName, String triggerName) List getTriggersForResourcePlan(String resourcePlanName) throws NoSuchObjectException, MetaException; + + void createOrUpdatePool(WMPool pool, String newPoolPath, boolean update) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, + MetaException; + + void dropWMPool(String resourcePlanName, String poolPath) + throws NoSuchObjectException, InvalidOperationException, MetaException; + + void createOrUpdateWMMapping(WMMapping mapping, boolean update) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, + MetaException; + + void dropWMMapping(WMMapping mapping) + throws NoSuchObjectException, InvalidOperationException, MetaException; + + void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, + MetaException; + + void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath) + throws NoSuchObjectException, InvalidOperationException, MetaException; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index c61f27b326..02c89ab8b4 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -87,6 +87,8 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; @@ -2243,7 +2245,7 @@ void setInitializedForTest() { @Override public void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize) - throws AlreadyExistsException, MetaException { + throws AlreadyExistsException, InvalidObjectException, MetaException { rawStore.createResourcePlan(resourcePlan, defaultPoolSize); } @@ -2305,4 +2307,43 @@ public void dropWMTrigger(String resourcePlanName, String triggerName) throws NoSuchObjectException, MetaException { return rawStore.getTriggersForResourcePlan(resourcePlanName); } + + @Override + public void createOrUpdatePool(WMPool pool, String newPoolPath, boolean update) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, + MetaException { + rawStore.createOrUpdatePool(pool, newPoolPath, update); + } + + @Override + public void dropWMPool(String resourcePlanName, String poolPath) + throws NoSuchObjectException, InvalidOperationException, MetaException { + rawStore.dropWMPool(resourcePlanName, poolPath); + } + + @Override + public void createOrUpdateWMMapping(WMMapping mapping, boolean update) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, + MetaException { + rawStore.createOrUpdateWMMapping(mapping, update); + } + + @Override + public void dropWMMapping(WMMapping mapping) + throws NoSuchObjectException, InvalidOperationException, MetaException { + rawStore.dropWMMapping(mapping); + } + + @Override + public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath) throws AlreadyExistsException, NoSuchObjectException, + InvalidOperationException, MetaException { + rawStore.createWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath); + } + + @Override + public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException { + rawStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath); + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java index e00a0206d0..01f2fe30ad 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java @@ -25,14 +25,13 @@ private String path; private Double allocFraction; private Integer queryParallelism; - private Set triggers; private String schedulingPolicy; - private MWMPool parentPool; + private Set triggers; public MWMPool() {} - public MWMPool(MWMResourcePlan resourcePlan, String path, MWMPool parentPool, - Double allocFraction, Integer queryParallelism, String schedulingPolicy) { + public MWMPool(MWMResourcePlan resourcePlan, String path, Double allocFraction, + Integer queryParallelism, String schedulingPolicy) { this.resourcePlan = resourcePlan; this.path = path; this.allocFraction = allocFraction; @@ -72,14 +71,6 @@ public void setQueryParallelism(Integer queryParallelism) { this.queryParallelism = queryParallelism; } - public Set getTriggers() { - return triggers; - } - - public void setTriggers(Set triggers) { - this.triggers = triggers; - } - public String getSchedulingPolicy() { return schedulingPolicy; } @@ -88,11 +79,11 @@ public void setSchedulingPolicy(String schedulingPolicy) { this.schedulingPolicy = schedulingPolicy; } - public MWMPool getParentPool() { - return parentPool; + public Set getTriggers() { + return triggers; } - public void setParentPool(MWMPool parentPool) { - this.parentPool = parentPool; + public void setTriggers(Set triggers) { + this.triggers = triggers; } } diff --git standalone-metastore/src/main/resources/package.jdo standalone-metastore/src/main/resources/package.jdo index 32426303e9..57e75f890d 100644 --- standalone-metastore/src/main/resources/package.jdo +++ standalone-metastore/src/main/resources/package.jdo @@ -1123,19 +1123,17 @@ - - - - - - + + + + diff --git standalone-metastore/src/main/thrift/hive_metastore.thrift standalone-metastore/src/main/thrift/hive_metastore.thrift index 4832a6f75d..5168f2f85f 100644 --- standalone-metastore/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -1065,7 +1065,7 @@ struct WMMapping { 1: required string resourcePlanName; 2: required string entityType; 3: required string entityName; - 4: optional string poolName; + 4: optional string poolPath; 5: optional i32 ordering; } @@ -1168,6 +1168,47 @@ struct WMGetTriggersForResourePlanResponse { 1: optional list triggers; } +struct WMCreateOrUpdatePoolRequest { + 1: optional WMPool pool; + 2: optional string newPoolPath; + 3: optional bool update; +} + +struct WMCreateOrUpdatePoolResponse { +} + +struct WMDropPoolRequest { + 1: optional string resourcePlanName; + 2: optional string poolPath; +} + +struct WMDropPoolResponse { +} + +struct WMCreateOrUpdateMappingRequest { + 1: optional WMMapping mapping; + 2: optional bool update; +} + +struct WMCreateOrUpdateMappingResponse { +} + +struct WMDropMappingRequest { + 1: optional WMMapping mapping; +} + +struct WMDropMappingResponse { +} + +struct WMCreateOrDropTriggerToPoolMappingRequest { + 1: optional string resourcePlanName; + 2: optional string triggerName; + 3: optional string poolPath; + 4: optional bool drop; +} + +struct WMCreateOrDropTriggerToPoolMappingResponse { +} // Exceptions. @@ -1767,6 +1808,21 @@ service ThriftHiveMetastore extends fb303.FacebookService WMGetTriggersForResourePlanResponse get_triggers_for_resourceplan(1:WMGetTriggersForResourePlanRequest request) throws(1:NoSuchObjectException o1, 2:MetaException o2) + + WMCreateOrUpdatePoolResponse create_or_update_wm_pool(1:WMCreateOrUpdatePoolRequest request) + throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:MetaException o4) + + WMDropPoolResponse drop_wm_pool(1:WMDropPoolRequest request) + throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) + + WMCreateOrUpdateMappingResponse create_or_update_wm_mapping(1:WMCreateOrUpdateMappingRequest request) + throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:MetaException o4) + + WMDropMappingResponse drop_wm_mapping(1:WMDropMappingRequest request) + throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) + + WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(1:WMCreateOrDropTriggerToPoolMappingRequest request) + throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:MetaException o4) } // * Note about the DDL_TIME: When creating or altering a table or a partition,