diff --git hcatalog/webhcat/java-client/pom.xml hcatalog/webhcat/java-client/pom.xml index 0c9fc15e63..ea518549df 100644 --- hcatalog/webhcat/java-client/pom.xml +++ hcatalog/webhcat/java-client/pom.xml @@ -91,7 +91,7 @@ ${basedir}/src/main/java - ${basedir}/src/test + ${basedir}/src/test/java org.apache.maven.plugins diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 5bd146d1eb..fe7b234ba7 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -57,6 +58,7 @@ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; @@ -974,4 +976,36 @@ public void dropConstraint(String dbName, String tableName, public String getMetastoreDbUuid() throws MetaException { throw new MetaException("getMetastoreDbUuid is not implemented"); } + + @Override + public void createResourcePlan(WMResourcePlan resourcePlan) throws MetaException { + objectStore.createResourcePlan(resourcePlan); + } + + @Override + public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException { + return objectStore.getResourcePlan(name); + } + + @Override + public List getAllResourcePlans() throws MetaException { + return objectStore.getAllResourcePlans(); + } + + @Override + public void alterResourcePlan(String name, WMResourcePlan resourcePlan) + throws NoSuchObjectException, InvalidOperationException, MetaException { + objectStore.alterResourcePlan(name, resourcePlan); + } + + @Override + public void validateResourcePlan(String name) + throws NoSuchObjectException, InvalidObjectException, MetaException { + objectStore.validateResourcePlan(name); + } + + @Override + public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException { + objectStore.dropResourcePlan(name); + } } diff --git itests/src/test/resources/testconfiguration.properties itests/src/test/resources/testconfiguration.properties index 8d92da3cbe..b8ac2a3994 100644 --- itests/src/test/resources/testconfiguration.properties +++ itests/src/test/resources/testconfiguration.properties @@ -566,6 +566,7 @@ minillaplocal.query.files=\ ptf.q,\ ptf_streaming.q,\ quotedid_smb.q,\ + resourceplan.q,\ sample10.q,\ schema_evol_orc_acid_part.q,\ schema_evol_orc_acid_part_update.q,\ diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 23033fac81..611f9fd2c7 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -7386,6 +7386,71 @@ public String get_metastore_db_uuid() throws MetaException, TException { throw e; } } + + @Override + public void create_resource_plan(WMResourcePlan resourcePlan) + throws InvalidObjectException, MetaException, TException { + try { + getMS().createResourcePlan(resourcePlan); + } catch (MetaException e) { + LOG.error("Exception while trying to persist resource plan", e); + throw e; + } + } + + @Override + public WMResourcePlan get_resource_plan(String name) + throws NoSuchObjectException, MetaException, TException { + try { + return getMS().getResourcePlan(name); + } catch (MetaException e) { + LOG.error("Exception while trying to retrieve resource plan", e); + throw e; + } + } + + @Override + public List get_all_resource_plans() throws MetaException, TException { + try { + return getMS().getAllResourcePlans(); + } catch (MetaException e) { + LOG.error("Exception while trying to retrieve resource plans", e); + throw e; + } + } + + @Override + public void alter_resource_plan(String name, WMResourcePlan resourcePlan) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + try { + getMS().alterResourcePlan(name, resourcePlan); + } catch (MetaException e) { + LOG.error("Exception while trying to retrieve resource plans", e); + throw e; + } + } + + @Override + public void validate_resource_plan(String name) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + try { + getMS().validateResourcePlan(name); + } catch (MetaException e) { + LOG.error("Exception while trying to retrieve resource plans", e); + throw e; + } + } + + @Override + public void drop_resource_plan(String name) + throws NoSuchObjectException, MetaException, TException { + try { + getMS().dropResourcePlan(name); + } catch (MetaException e) { + LOG.error("Exception while trying to retrieve resource plans", e); + throw e; + } + } } public static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, HiveConf hiveConf) diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 3f5f80e1cf..81717f8494 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -2635,4 +2635,40 @@ public boolean cacheFileMetadata( public String getMetastoreDbUuid() throws TException { return client.get_metastore_db_uuid(); } + + @Override + public void createResourcePlan(WMResourcePlan resourcePlan) + throws InvalidObjectException, MetaException, TException { + client.create_resource_plan(resourcePlan); + } + + @Override + public WMResourcePlan getResourcePlan(String resourcePlanName) + throws NoSuchObjectException, MetaException, TException { + return client.get_resource_plan(resourcePlanName); + } + + @Override + public List getAllResourcePlans() + throws NoSuchObjectException, MetaException, TException { + return client.get_all_resource_plans(); + } + + @Override + public void dropResourcePlan(String resourcePlanName) + throws NoSuchObjectException, MetaException, TException { + client.drop_resource_plan(resourcePlanName); + } + + @Override + public void alterResourcePlan(String resourcePlanName, WMResourcePlan resourcePlan) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + client.alter_resource_plan(resourcePlanName, resourcePlan); + } + + @Override + public void validateResourcePlan(String resourcePlanName) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + client.validate_resource_plan(resourcePlanName); + } } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index a08fc722c7..f024219d87 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -86,6 +86,7 @@ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; @@ -1766,4 +1767,21 @@ void addNotNullConstraint(List notNullConstraintCols) thro */ String getMetastoreDbUuid() throws MetaException, TException; + void createResourcePlan(WMResourcePlan resourcePlan) + throws InvalidObjectException, MetaException, TException; + + WMResourcePlan getResourcePlan(String resourcePlanName) + throws NoSuchObjectException, MetaException, TException; + + List getAllResourcePlans() + throws NoSuchObjectException, MetaException, TException; + + void dropResourcePlan(String resourcePlanName) + throws NoSuchObjectException, MetaException, TException; + + void alterResourcePlan(String resourcePlanName, WMResourcePlan resourcePlan) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException; + + void validateResourcePlan(String resourcePlanName) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException; } diff --git metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index a75dbb047c..13479af37f 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -52,6 +53,7 @@ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; @@ -933,4 +935,36 @@ public void dropConstraint(String dbName, String tableName, public String getMetastoreDbUuid() throws MetaException { throw new MetaException("Get metastore uuid is not implemented"); } + + @Override + public void createResourcePlan(WMResourcePlan resourcePlan) throws MetaException { + objectStore.createResourcePlan(resourcePlan); + } + + @Override + public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException { + return objectStore.getResourcePlan(name); + } + + @Override + public List getAllResourcePlans() throws MetaException { + return objectStore.getAllResourcePlans(); + } + + @Override + public void alterResourcePlan(String name, WMResourcePlan resourcePlan) + throws NoSuchObjectException, InvalidOperationException, MetaException { + objectStore.alterResourcePlan(name, resourcePlan); + } + + @Override + public void validateResourcePlan(String name) + throws NoSuchObjectException, InvalidObjectException, MetaException { + objectStore.validateResourcePlan(name); + } + + @Override + public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException { + objectStore.dropResourcePlan(name); + } } diff --git metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index f388066a92..6f25cf2cad 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -53,6 +54,7 @@ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; @@ -946,4 +948,32 @@ public void dropConstraint(String dbName, String tableName, public String getMetastoreDbUuid() throws MetaException { throw new MetaException("Get metastore uuid is not implemented"); } + + @Override + public void createResourcePlan(WMResourcePlan resourcePlan) throws MetaException { + } + + @Override + public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException { + return null; + } + + @Override + public List getAllResourcePlans() throws MetaException { + return null; + } + + @Override + public void alterResourcePlan(String name, WMResourcePlan resourcePlan) + throws NoSuchObjectException, InvalidOperationException, MetaException { + } + + @Override + public void validateResourcePlan(String name) + throws NoSuchObjectException, InvalidObjectException, MetaException { + } + + @Override + public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException { + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 335ea635d2..3a0fc8aac7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -88,6 +88,7 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; @@ -161,6 +162,7 @@ import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; import org.apache.hadoop.hive.ql.plan.AlterIndexDesc; +import org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; @@ -170,6 +172,7 @@ import org.apache.hadoop.hive.ql.plan.ColStatistics; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; +import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; import org.apache.hadoop.hive.ql.plan.CreateViewDesc; @@ -179,6 +182,7 @@ import org.apache.hadoop.hive.ql.plan.DescTableDesc; import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DropIndexDesc; +import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.FileMergeDesc; import org.apache.hadoop.hive.ql.plan.GrantDesc; @@ -212,6 +216,7 @@ import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; +import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc; @@ -607,6 +612,22 @@ public int execute(DriverContext driverContext) { if (killQueryDesc != null) { return killQuery(db, killQueryDesc); } + + if (work.getCreateResourcePlanDesc() != null) { + return createResourcePlan(db, work.getCreateResourcePlanDesc()); + } + + if (work.getShowResourcePlanDesc() != null) { + return showResourcePlans(db, work.getShowResourcePlanDesc()); + } + + if (work.getAlterResourcePlanDesc() != null) { + return alterResourcePlan(db, work.getAlterResourcePlanDesc()); + } + + if (work.getDropResourcePlanDesc() != null) { + return dropResourcePlan(db, work.getDropResourcePlanDesc()); + } } catch (Throwable e) { failed(e); return 1; @@ -615,6 +636,69 @@ public int execute(DriverContext driverContext) { return 0; } + private int createResourcePlan(Hive db, CreateResourcePlanDesc createResourcePlanDesc) + throws HiveException { + WMResourcePlan resourcePlan = new WMResourcePlan(); + resourcePlan.setName(createResourcePlanDesc.getName()); + if (createResourcePlanDesc.getQueryParallelism() != null) { + resourcePlan.setQueryParallelism(createResourcePlanDesc.getQueryParallelism()); + } + db.createResourcePlan(resourcePlan); + return 0; + } + + private int showResourcePlans(Hive db, ShowResourcePlanDesc showResourcePlanDesc) + throws HiveException { + // Note: Enhance showResourcePlan to display all the pools, triggers and mappings. + DataOutputStream out = getOutputStream(showResourcePlanDesc.getResFile()); + try { + List resourcePlans; + String rpName = showResourcePlanDesc.getResourcePlanName(); + if (rpName != null) { + resourcePlans = Collections.singletonList(db.getResourcePlan(rpName)); + } else { + resourcePlans = db.geAllResourcePlans(); + } + formatter.showResourcePlans(out, resourcePlans); + } catch (Exception e) { + throw new HiveException(e); + } finally { + IOUtils.closeStream(out); + } + return 0; + } + + private int alterResourcePlan(Hive db, AlterResourcePlanDesc desc) throws HiveException { + if (desc.shouldValidate()) { + db.validateResourcePlan(desc.getRpName()); + return 0; + } + + WMResourcePlan resourcePlan = new WMResourcePlan(); + + if (desc.getNewName() != null) { + resourcePlan.setName(desc.getNewName()); + } else { + resourcePlan.setName(desc.getRpName()); + } + + if (desc.getQueryParallelism() != null) { + resourcePlan.setQueryParallelism(desc.getQueryParallelism()); + } + + if (desc.getStatus() != null) { + resourcePlan.setStatus(desc.getStatus()); + } + + db.alterResourcePlan(desc.getRpName(), resourcePlan); + return 0; + } + + private int dropResourcePlan(Hive db, DropResourcePlanDesc desc) throws HiveException { + db.dropResourcePlan(desc.getRpName()); + return 0; + } + private int preInsertWork(Hive db, PreInsertTableDesc preInsertTableDesc) throws HiveException { try{ HiveMetaHook hook = preInsertTableDesc.getTable().getStorageHandler().getMetaHook(); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 793ce9e673..090e11c097 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -122,6 +122,7 @@ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; @@ -4686,4 +4687,54 @@ public void addNotNullConstraint(List notNullConstraintCol throw new HiveException(e); } } + + public void createResourcePlan(WMResourcePlan resourcePlan) throws HiveException { + try { + getMSC().createResourcePlan(resourcePlan); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public WMResourcePlan getResourcePlan(String rpName) throws HiveException { + try { + return getMSC().getResourcePlan(rpName); + } catch (NoSuchObjectException e) { + return null; + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List geAllResourcePlans() throws HiveException { + try { + return getMSC().getAllResourcePlans(); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public void dropResourcePlan(String rpName) throws HiveException { + try { + getMSC().dropResourcePlan(rpName); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public void alterResourcePlan(String rpName, WMResourcePlan resourcePlan) throws HiveException { + try { + getMSC().alterResourcePlan(rpName, resourcePlan); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public void validateResourcePlan(String rpName) throws HiveException { + try { + getMSC().validateResourcePlan(rpName); + } catch (Exception e) { + throw new HiveException(e); + } + } }; diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java index bdf1b26015..a27f9349fb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java @@ -28,6 +28,7 @@ import java.util.Map; import java.util.Set; +import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,6 +39,7 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -46,6 +48,7 @@ import org.apache.hadoop.hive.ql.metadata.PrimaryKeyInfo; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.UniqueConstraint; +import org.codehaus.jackson.JsonGenerator; import org.codehaus.jackson.map.ObjectMapper; /** @@ -414,4 +417,31 @@ public void showDatabaseDescription(DataOutputStream out, String database, Strin } asJson(out, builder.build()); } + + @Override + public void showResourcePlans(DataOutputStream out, List resourcePlans) + throws HiveException { + JsonGenerator generator = null; + try { + generator = new ObjectMapper().getJsonFactory().createJsonGenerator(out); + generator.writeStartArray(); + for (WMResourcePlan plan : resourcePlans) { + generator.writeStartObject(); + generator.writeStringField("name", plan.getName()); + generator.writeStringField("status", plan.getStatus()); + if (plan.isSetQueryParallelism()) { + generator.writeNumberField("queryParallelism", plan.getQueryParallelism()); + } + generator.writeEndObject(); + } + generator.writeEndArray(); + generator.close(); + } catch (IOException e) { + throw new HiveException(e); + } finally { + if (generator != null) { + IOUtils.closeQuietly(generator); + } + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java index ac5b306b93..405acddbec 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -119,5 +120,8 @@ public void showDatabases(DataOutputStream out, List databases) public void showDatabaseDescription (DataOutputStream out, String database, String comment, String location, String ownerName, String ownerType, Map params) throws HiveException; + + public void showResourcePlans(DataOutputStream out, List resourcePlans) + throws HiveException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java index 765aa65a47..99d86a52e5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -543,4 +544,25 @@ public void showDatabaseDescription(DataOutputStream outStream, String database, throw new HiveException(e); } } + + public void showResourcePlans(DataOutputStream out, List resourcePlans) + throws HiveException { + try { + for (WMResourcePlan plan : resourcePlans) { + out.write(plan.getName().getBytes("UTF-8")); + out.write(separator); + if (plan.isSetQueryParallelism()) { + out.writeBytes(Integer.toString(plan.getQueryParallelism())); + } else { + out.writeBytes("null"); + } + out.write(separator); + out.write(plan.getStatus().getBytes("UTF-8")); + out.write(terminator); + } + } catch (IOException e) { + throw new HiveException(e); + } + } + } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 973051893a..2d59d60827 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -86,6 +86,7 @@ import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; import org.apache.hadoop.hive.ql.plan.AlterIndexDesc; import org.apache.hadoop.hive.ql.plan.AlterIndexDesc.AlterIndexTypes; +import org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; @@ -96,12 +97,14 @@ import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; +import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; import org.apache.hadoop.hive.ql.plan.DescTableDesc; import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DropIndexDesc; +import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; @@ -130,6 +133,7 @@ import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; +import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc; import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc; @@ -543,6 +547,19 @@ public void analyzeInternal(ASTNode input) throws SemanticException { case HiveParser.TOK_CACHE_METADATA: analyzeCacheMetadata(ast); break; + case HiveParser.TOK_CREATERESOURCEPLAN: + analyzeCreateResourcePlan(ast); + break; + case HiveParser.TOK_SHOWRESOURCEPLAN: + ctx.setResFile(ctx.getLocalTmpPath()); + analyzeShowResourcePlan(ast); + break; + case HiveParser.TOK_ALTER_RP: + analyzeAlterResourcePlan(ast); + break; + case HiveParser.TOK_DROP_RP: + analyzeDropResourcePlan(ast); + break; default: throw new SemanticException("Unsupported command: " + ast); } @@ -822,6 +839,91 @@ private int isPartitionValueContinuous(List partitionKeys, return counter; } + private void analyzeCreateResourcePlan(ASTNode ast) throws SemanticException { + if (ast.getChildCount() == 0) { + throw new SemanticException("Expected name in CREATE RESOURCE PLAN statement"); + } + String resourcePlanName = unescapeIdentifier(ast.getChild(0).getText()); + Integer queryParallelism = null; + if (ast.getChildCount() > 1) { + queryParallelism = Integer.parseInt(ast.getChild(1).getText()); + } + if (ast.getChildCount() > 2) { + throw new SemanticException("Invalid token in CREATE RESOURCE PLAN statement"); + } + CreateResourcePlanDesc desc = new CreateResourcePlanDesc(resourcePlanName, queryParallelism); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf)); + } + + private void analyzeShowResourcePlan(ASTNode ast) throws SemanticException { + String rpName = null; + if (ast.getChildCount() > 0) { + rpName = unescapeIdentifier(ast.getChild(0).getText()); + } + if (ast.getChildCount() > 1) { + throw new SemanticException("Invalid syntax for SHOW RESOURCE PLAN statement"); + } + ShowResourcePlanDesc showResourcePlanDesc = new ShowResourcePlanDesc(rpName, ctx.getResFile()); + rootTasks.add(TaskFactory.get( + new DDLWork(getInputs(), getOutputs(), showResourcePlanDesc), conf)); + setFetchTask(createFetchTask(showResourcePlanDesc.getSchema())); + } + + private void analyzeAlterResourcePlan(ASTNode ast) throws SemanticException { + if (ast.getChildCount() == 0) { + throw new SemanticException("Expected name in ALTER RESOURCE PLAN statement"); + } + String rpName = unescapeIdentifier(ast.getChild(0).getText()); + if (ast.getChildCount() < 2) { + throw new SemanticException("Invalid syntax for ALTER RESOURCE PLAN statement"); + } + AlterResourcePlanDesc desc; + switch (ast.getChild(1).getType()) { + case HiveParser.KW_VALIDATE: + desc = AlterResourcePlanDesc.createValidatePlan(rpName); + break; + case HiveParser.KW_ACTIVATE: + desc = AlterResourcePlanDesc.createChangeStatus(rpName, "ACTIVE"); + break; + case HiveParser.KW_ENABLE: + desc = AlterResourcePlanDesc.createChangeStatus(rpName, "ENABLED"); + break; + case HiveParser.KW_DISABLE: + desc = AlterResourcePlanDesc.createChangeStatus(rpName, "DISABLED"); + break; + case HiveParser.TOK_QUERY_PARALLELISM: + if (ast.getChildCount() != 3) { + throw new SemanticException( + "Expected number for query parallelism in alter resource plan statment"); + } + int queryParallelism = Integer.parseInt(ast.getChild(2).getText()); + desc = AlterResourcePlanDesc.createChangeParallelism(rpName, queryParallelism); + break; + case HiveParser.TOK_RENAME: + if (ast.getChildCount() != 3) { + throw new SemanticException( + "Expected new name for rename in alter resource plan statment"); + } + String name = ast.getChild(2).getText(); + desc = AlterResourcePlanDesc.createRenamePlan(rpName, name); + break; + default: + throw new SemanticException("Unexpected token in alter resource plan statement"); + } + rootTasks.add(TaskFactory.get( + new DDLWork(getInputs(), getOutputs(), desc), conf)); + } + + private void analyzeDropResourcePlan(ASTNode ast) throws SemanticException { + if (ast.getChildCount() == 0) { + throw new SemanticException("Expected name in DROP RESOURCE PLAN statement"); + } + String rpName = unescapeIdentifier(ast.getChild(0).getText()); + DropResourcePlanDesc desc = new DropResourcePlanDesc(rpName); + rootTasks.add(TaskFactory.get( + new DDLWork(getInputs(), getOutputs(), desc), conf)); + } + private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { String dbName = unescapeIdentifier(ast.getChild(0).getText()); boolean ifNotExists = false; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g index d0ce4ae1c3..9147b4aad9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g @@ -352,6 +352,10 @@ KW_OPERATOR: 'OPERATOR'; KW_EXPRESSION: 'EXPRESSION'; KW_DETAIL: 'DETAIL'; KW_WAIT: 'WAIT'; +KW_RESOURCE: 'RESOURCE'; +KW_PLAN: 'PLAN'; +KW_QUERY_PARALLELISM: 'QUERY_PARALLELISM'; +KW_PLANS: 'PLANS'; // Operators // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work. diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index c2d89042ca..abe3f2b4ae 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -405,6 +405,14 @@ TOK_EXPRESSION; TOK_DETAIL; TOK_BLOCKING; TOK_KILL_QUERY; +TOK_CREATERESOURCEPLAN; +TOK_SHOWRESOURCEPLAN; +TOK_ALTER_RP; +TOK_DROP_RP; +TOK_VALIDATE; +TOK_ACTIVATE; +TOK_QUERY_PARALLELISM; +TOK_RENAME; } @@ -577,6 +585,11 @@ import org.apache.hadoop.hive.conf.HiveConf; xlateMap.put("KW_WAIT", "WAIT"); xlateMap.put("KW_KILL", "KILL"); xlateMap.put("KW_QUERY", "QUERY"); + xlateMap.put("KW_RESOURCE", "RESOURCE"); + xlateMap.put("KW_PLAN", "PLAN"); + xlateMap.put("KW_QUERY_PARALLELISM", "QUERY_PARALLELISM"); + xlateMap.put("KW_PLANS", "PLANS"); + xlateMap.put("KW_ACTIVATE", "ACTIVATE"); // Operators xlateMap.put("DOT", "."); @@ -915,6 +928,9 @@ ddlStatement | showCurrentRole | abortTransactionStatement | killQueryStatement + | createResourcePlanStatement + | alterResourcePlanStatement + | dropResourcePlanStatement ; ifExists @@ -968,6 +984,37 @@ orReplace -> ^(TOK_ORREPLACE) ; +createResourcePlanStatement +@init { pushMsg("create resource plan statement", state); } +@after { popMsg(state); } + : KW_CREATE KW_RESOURCE KW_PLAN + name=identifier + (KW_WITH KW_QUERY_PARALLELISM parallelism=Number)? + -> ^(TOK_CREATERESOURCEPLAN $name $parallelism?) + ; + +alterResourcePlanStatement +@init { pushMsg("alter resource plan statement", state); } +@after { popMsg(state); } + : KW_ALTER KW_RESOURCE KW_PLAN name=identifier ( + (KW_VALIDATE -> TOK_ALTER_RP $name TOK_VALIDATE) + | (KW_ACTIVATE -> TOK_ALTER_RP $name TOK_ACTIVATE) + | (KW_ENABLE -> TOK_ALTER_RP $name TOK_ENABLE) + | (KW_DISABLE -> TOK_ALTER_RP $name TOK_DISABLE) + | (KW_SET KW_QUERY_PARALLELISM EQUAL parallelism=Number + -> TOK_ALTER_RP $name TOK_QUERY_PARALLELISM $parallelism) + | (KW_RENAME KW_TO newName=identifier + -> TOK_ALTER_RP $name TOK_RENAME $newName) + ) + ; + +dropResourcePlanStatement +@init { pushMsg("drop resource plan statement", state); } +@after { popMsg(state); } + : KW_DROP KW_RESOURCE KW_PLAN name=identifier + -> TOK_DROP_RP $name + ; + createDatabaseStatement @init { pushMsg("create database statement", state); } @after { popMsg(state); } @@ -1595,6 +1642,11 @@ showStatement | KW_SHOW KW_COMPACTIONS -> ^(TOK_SHOW_COMPACTIONS) | KW_SHOW KW_TRANSACTIONS -> ^(TOK_SHOW_TRANSACTIONS) | KW_SHOW KW_CONF StringLiteral -> ^(TOK_SHOWCONF StringLiteral) + | KW_SHOW KW_RESOURCE + ( + (KW_PLAN rp_name=identifier -> ^(TOK_SHOWRESOURCEPLAN $rp_name)) + | (KW_PLANS -> ^(TOK_SHOWRESOURCEPLAN)) + ) ; lockStatement diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index f481308da5..b0afb6af60 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -134,6 +134,10 @@ commandType.put(HiveParser.TOK_REPL_LOAD, HiveOperation.REPLLOAD); commandType.put(HiveParser.TOK_REPL_STATUS, HiveOperation.REPLSTATUS); commandType.put(HiveParser.TOK_KILL_QUERY, HiveOperation.KILL_QUERY); + commandType.put(HiveParser.TOK_CREATERESOURCEPLAN, HiveOperation.CREATE_RESOURCEPLAN); + commandType.put(HiveParser.TOK_SHOWRESOURCEPLAN, HiveOperation.SHOW_RESOURCEPLAN); + commandType.put(HiveParser.TOK_ALTER_RP, HiveOperation.SHOW_RESOURCEPLAN); + commandType.put(HiveParser.TOK_DROP_RP, HiveOperation.SHOW_RESOURCEPLAN); } static { @@ -309,6 +313,8 @@ private static BaseSemanticAnalyzer getInternal(QueryState queryState, ASTNode t case HiveParser.TOK_SHOW_SET_ROLE: case HiveParser.TOK_CACHE_METADATA: case HiveParser.TOK_KILL_QUERY: + case HiveParser.TOK_CREATERESOURCEPLAN: + case HiveParser.TOK_SHOWRESOURCEPLAN: return new DDLSemanticAnalyzer(queryState); case HiveParser.TOK_CREATEFUNCTION: diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java new file mode 100644 index 0000000000..360272da37 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java @@ -0,0 +1,89 @@ +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +@Explain(displayName = "Alter Resource plans", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterResourcePlanDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = -3514685833183437279L; + + private String rpName; + private String newName; + private Integer queryParallelism; + private String status; + private boolean validate; + + public AlterResourcePlanDesc() {} + + private AlterResourcePlanDesc(String rpName, String newName, Integer queryParallelism, + String status, boolean validate) { + this.rpName = rpName; + this.newName = newName; + this.queryParallelism = queryParallelism; + this.status = status; + this.validate = validate; + } + + public static AlterResourcePlanDesc createChangeParallelism(String rpName, + int queryParallelism) { + return new AlterResourcePlanDesc(rpName, null, queryParallelism, null, false); + } + + public static AlterResourcePlanDesc createChangeStatus(String rpName, String status) { + return new AlterResourcePlanDesc(rpName, null, null, status, false); + } + + public static AlterResourcePlanDesc createValidatePlan(String rpName) { + return new AlterResourcePlanDesc(rpName, null, null, null, true); + } + + public static AlterResourcePlanDesc createRenamePlan(String rpName, String newName) { + return new AlterResourcePlanDesc(rpName, newName, null, null, false); + } + + @Explain(displayName="resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getRpName() { + return rpName; + } + + public void setRpName(String rpName) { + this.rpName = rpName; + } + + @Explain(displayName="newResourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getNewName() { + return newName; + } + + public void setNewName(String newName) { + this.newName = newName; + } + + @Explain(displayName="queryParallelism", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public Integer getQueryParallelism() { + return queryParallelism; + } + + public void setQueryParallelism(Integer queryParallelism) { + this.queryParallelism = queryParallelism; + } + + @Explain(displayName="status", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getStatus() { + return status; + } + + public void setStatus(String status) { + this.status = status; + } + + @Explain(displayName="shouldValidate", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public boolean shouldValidate() { + return validate; + } + + public void setValidate(boolean validate) { + this.validate = validate; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateResourcePlanDesc.java new file mode 100644 index 0000000000..348e3150be --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateResourcePlanDesc.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +@Explain(displayName = "Create ResourcePlan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class CreateResourcePlanDesc extends DDLDesc implements Serializable { + + private static final long serialVersionUID = -3649343104271794404L; + + private String planName; + private Integer queryParallelism; + + // For serialization only. + public CreateResourcePlanDesc() { + } + + public CreateResourcePlanDesc(String planName, Integer queryParallelism) { + this.planName = planName; + this.queryParallelism = queryParallelism; + } + + @Explain(displayName="name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getName() { + return planName; + } + + @Explain(displayName="queryParallelism") + public Integer getQueryParallelism() { + return queryParallelism; + } +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index 0b7c559648..cfd2115424 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -86,6 +86,11 @@ private ShowConfDesc showConfDesc; + private CreateResourcePlanDesc createResourcePlanDesc; + private ShowResourcePlanDesc showResourcePlanDesc; + private DropResourcePlanDesc dropResourcePlanDesc; + private AlterResourcePlanDesc alterResourcePlanDesc; + boolean needLock = false; /** @@ -547,6 +552,30 @@ public DDLWork(HashSet inputs, HashSet outputs, this.killQueryDesc = killQueryDesc; } + public DDLWork(HashSet inputs, HashSet outputs, + CreateResourcePlanDesc createResourcePlanDesc) { + this(inputs, outputs); + this.createResourcePlanDesc = createResourcePlanDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + ShowResourcePlanDesc showResourcePlanDesc) { + this(inputs, outputs); + this.showResourcePlanDesc = showResourcePlanDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + DropResourcePlanDesc dropResourcePlanDesc) { + this(inputs, outputs); + this.setDropResourcePlanDesc(dropResourcePlanDesc); + } + + public DDLWork(HashSet inputs, HashSet outputs, + AlterResourcePlanDesc alterResourcePlanDesc) { + this(inputs, outputs); + this.setAlterResourcePlanDesc(alterResourcePlanDesc); + } + /** * @return Create Database descriptor */ @@ -1235,4 +1264,38 @@ public PreInsertTableDesc getPreInsertTableDesc() { public void setPreInsertTableDesc(PreInsertTableDesc preInsertTableDesc) { this.preInsertTableDesc = preInsertTableDesc; } + + @Explain(displayName = "Create resource plan") + public CreateResourcePlanDesc getCreateResourcePlanDesc() { + return createResourcePlanDesc; + } + + public void setCreateResourcePlanDesc(CreateResourcePlanDesc createResourcePlanDesc) { + this.createResourcePlanDesc = createResourcePlanDesc; + } + + @Explain(displayName = "Show resource plan") + public ShowResourcePlanDesc getShowResourcePlanDesc() { + return showResourcePlanDesc; + } + + public void setShowResourcePlanDesc(ShowResourcePlanDesc showResourcePlanDesc) { + this.showResourcePlanDesc = showResourcePlanDesc; + } + + public DropResourcePlanDesc getDropResourcePlanDesc() { + return dropResourcePlanDesc; + } + + public void setDropResourcePlanDesc(DropResourcePlanDesc dropResourcePlanDesc) { + this.dropResourcePlanDesc = dropResourcePlanDesc; + } + + public AlterResourcePlanDesc getAlterResourcePlanDesc() { + return alterResourcePlanDesc; + } + + public void setAlterResourcePlanDesc(AlterResourcePlanDesc alterResourcePlanDesc) { + this.alterResourcePlanDesc = alterResourcePlanDesc; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DropResourcePlanDesc.java new file mode 100644 index 0000000000..7fafdea424 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DropResourcePlanDesc.java @@ -0,0 +1,28 @@ +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +@Explain(displayName = "Drop Resource plans", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class DropResourcePlanDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1258596919510047766L; + + private String rpName; + + public DropResourcePlanDesc() {} + + public DropResourcePlanDesc(String rpName) { + this.setRpName(rpName); + } + + @Explain(displayName="resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getRpName() { + return rpName; + } + + public void setRpName(String rpName) { + this.rpName = rpName; + } + +} \ No newline at end of file diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java index 0f69de27dc..4730dc1f9b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java @@ -139,7 +139,11 @@ ROLLBACK("ROLLBACK", null, null, true, true), SET_AUTOCOMMIT("SET AUTOCOMMIT", null, null, true, false), ABORT_TRANSACTIONS("ABORT TRANSACTIONS", null, null, false, false), - KILL_QUERY("KILL QUERY", null, null); + KILL_QUERY("KILL QUERY", null, null), + CREATE_RESOURCEPLAN("CREATE_RESOURCEPLAN", null, null, false, false), + SHOW_RESOURCEPLAN("SHOW_RESOURCEPLAN", null, null, false, false), + ALTER_RESOURCEPLAN("ALTER_RESOURCEPLAN", null, null, false, false), + DROP_RESOURCEPLAN("DROP_RESOURCEPLAN", null, null, false, false); private String operationName; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowResourcePlanDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowResourcePlanDesc.java new file mode 100644 index 0000000000..0b4cfb5adc --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowResourcePlanDesc.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +@Explain(displayName = "Show Resource plans", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ShowResourcePlanDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 6076076933035978545L; + + private static final String table = "show_resourceplan"; + private static final String schema = "rp_name,status,query_parallelism#string,string,int"; + + String resFile; + String resourcePlanName; + + // For serialization only. + public ShowResourcePlanDesc() {} + + public ShowResourcePlanDesc(String rpName, Path resFile) { + this.resourcePlanName = rpName; + this.resFile = resFile.toString(); + } + + @Explain(displayName = "result file", explainLevels = { Level.EXTENDED }) + public String getResFile() { + return resFile; + } + + public void setResFile(String resFile) { + this.resFile = resFile; + } + + @Explain(displayName="resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getResourcePlanName() { + return resourcePlanName; + } + + public String getTable() { + return table; + } + + public String getSchema() { + return schema; + } +} diff --git ql/src/test/queries/clientpositive/resourceplan.q ql/src/test/queries/clientpositive/resourceplan.q new file mode 100644 index 0000000000..ea6734c05b --- /dev/null +++ ql/src/test/queries/clientpositive/resourceplan.q @@ -0,0 +1,105 @@ +-- Continue on errors, we do check some error conditions below. +-- set hive.cli.errors.ignore=true; + +-- Prevent NPE in calcite. +set hive.cbo.enable=false; + +-- Force DN to create db_privs tables. +show grant user hive_test_user; + +-- Initialize the hive schema. +source ../../metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql; + +-- +-- Actual tests. +-- + +-- Empty resource plans. +SHOW RESOURCE PLANS; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- Create and show plan_1. +CREATE RESOURCE PLAN plan_1; +SHOW RESOURCE PLANS; +SHOW RESOURCE PLAN plan_1; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- Create and show plan_2. +CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM 10; +SHOW RESOURCE PLANS; +SHOW RESOURCE PLAN plan_2; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- +-- Rename resource plans. +-- + +-- Fail, duplicate name. +-- ALTER RESOURCE PLAN plan_1 RENAME TO plan_2; +-- SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- Success. +ALTER RESOURCE PLAN plan_1 RENAME TO plan_3; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- Change query parallelism, success. +ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 20; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- +-- Activate, enable, disable. +-- + +-- DISABLED -> ACTIVE fail. +ALTER RESOURCE PLAN plan_3 ACTIVATE; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- DISABLED -> DISABLED success. +ALTER RESOURCE PLAN plan_3 ACTIVATE; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- DISABLED -> ENABLED success. +ALTER RESOURCE PLAN plan_3 ENABLE; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- ENABLED -> ACTIVE success. +ALTER RESOURCE PLAN plan_3 ACTIVATE; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- ACTIVE -> ACTIVE success. +ALTER RESOURCE PLAN plan_3 ACTIVATE; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- ACTIVE -> ENABLED fail. +ALTER RESOURCE PLAN plan_3 ENABLE; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- ACTIVE -> DISABLED fail. +ALTER RESOURCE PLAN plan_3 DISABLE; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- DISABLED -> ENABLED success. +ALTER RESOURCE PLAN plan_2 ENABLE; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- plan_2: ENABLED -> ACTIVE and plan_3: ACTIVE -> ENABLED, success. +ALTER RESOURCE PLAN plan_2 ACTIVATE; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- ENABLED -> ENABLED success. +ALTER RESOURCE PLAN plan_3 ENABLE; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- ENABLED -> DISABLED success. +ALTER RESOURCE PLAN plan_3 DISABLE; +SELECT * FROM SYS.WM_RESOURCEPLANS; + +-- +-- Drop resource plan. +-- + +-- Success. +DROP RESOURCE PLAN plan_3; + +-- Fail. +DROP RESOURCE PLAN plan_2; diff --git ql/src/test/results/clientpositive/llap/resourceplan.q.out ql/src/test/results/clientpositive/llap/resourceplan.q.out new file mode 100644 index 0000000000..2493f0b987 --- /dev/null +++ ql/src/test/results/clientpositive/llap/resourceplan.q.out @@ -0,0 +1,2973 @@ +PREHOOK: query: show grant user hive_test_user +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user +POSTHOOK: type: SHOW_GRANT +default alltypesorc hive_test_user USER DELETE true -1 hive_test_user +default alltypesorc hive_test_user USER INSERT true -1 hive_test_user +default alltypesorc hive_test_user USER SELECT true -1 hive_test_user +default alltypesorc hive_test_user USER UPDATE true -1 hive_test_user +default cbo_t1 hive_test_user USER DELETE true -1 hive_test_user +default cbo_t1 hive_test_user USER INSERT true -1 hive_test_user +default cbo_t1 hive_test_user USER SELECT true -1 hive_test_user +default cbo_t1 hive_test_user USER UPDATE true -1 hive_test_user +default cbo_t2 hive_test_user USER DELETE true -1 hive_test_user +default cbo_t2 hive_test_user USER INSERT true -1 hive_test_user +default cbo_t2 hive_test_user USER SELECT true -1 hive_test_user +default cbo_t2 hive_test_user USER UPDATE true -1 hive_test_user +default cbo_t3 hive_test_user USER DELETE true -1 hive_test_user +default cbo_t3 hive_test_user USER INSERT true -1 hive_test_user +default cbo_t3 hive_test_user USER SELECT true -1 hive_test_user +default cbo_t3 hive_test_user USER UPDATE true -1 hive_test_user +default lineitem hive_test_user USER DELETE true -1 hive_test_user +default lineitem hive_test_user USER INSERT true -1 hive_test_user +default lineitem hive_test_user USER SELECT true -1 hive_test_user +default lineitem hive_test_user USER UPDATE true -1 hive_test_user +default part hive_test_user USER DELETE true -1 hive_test_user +default part hive_test_user USER INSERT true -1 hive_test_user +default part hive_test_user USER SELECT true -1 hive_test_user +default part hive_test_user USER UPDATE true -1 hive_test_user +default src hive_test_user USER DELETE true -1 hive_test_user +default src hive_test_user USER INSERT true -1 hive_test_user +default src hive_test_user USER SELECT true -1 hive_test_user +default src hive_test_user USER UPDATE true -1 hive_test_user +default src1 hive_test_user USER DELETE true -1 hive_test_user +default src1 hive_test_user USER INSERT true -1 hive_test_user +default src1 hive_test_user USER SELECT true -1 hive_test_user +default src1 hive_test_user USER UPDATE true -1 hive_test_user +default src_cbo hive_test_user USER DELETE true -1 hive_test_user +default src_cbo hive_test_user USER INSERT true -1 hive_test_user +default src_cbo hive_test_user USER SELECT true -1 hive_test_user +default src_cbo hive_test_user USER UPDATE true -1 hive_test_user +default src_json hive_test_user USER DELETE true -1 hive_test_user +default src_json hive_test_user USER INSERT true -1 hive_test_user +default src_json hive_test_user USER SELECT true -1 hive_test_user +default src_json hive_test_user USER UPDATE true -1 hive_test_user +default src_sequencefile hive_test_user USER DELETE true -1 hive_test_user +default src_sequencefile hive_test_user USER INSERT true -1 hive_test_user +default src_sequencefile hive_test_user USER SELECT true -1 hive_test_user +default src_sequencefile hive_test_user USER UPDATE true -1 hive_test_user +default src_thrift hive_test_user USER DELETE true -1 hive_test_user +default src_thrift hive_test_user USER INSERT true -1 hive_test_user +default src_thrift hive_test_user USER SELECT true -1 hive_test_user +default src_thrift hive_test_user USER UPDATE true -1 hive_test_user +default srcbucket hive_test_user USER DELETE true -1 hive_test_user +default srcbucket hive_test_user USER INSERT true -1 hive_test_user +default srcbucket hive_test_user USER SELECT true -1 hive_test_user +default srcbucket hive_test_user USER UPDATE true -1 hive_test_user +default srcbucket2 hive_test_user USER DELETE true -1 hive_test_user +default srcbucket2 hive_test_user USER INSERT true -1 hive_test_user +default srcbucket2 hive_test_user USER SELECT true -1 hive_test_user +default srcbucket2 hive_test_user USER UPDATE true -1 hive_test_user +default srcpart hive_test_user USER DELETE true -1 hive_test_user +default srcpart hive_test_user USER INSERT true -1 hive_test_user +default srcpart hive_test_user USER SELECT true -1 hive_test_user +default srcpart hive_test_user USER UPDATE true -1 hive_test_user +PREHOOK: query: DROP DATABASE IF EXISTS SYS +PREHOOK: type: DROPDATABASE +POSTHOOK: query: DROP DATABASE IF EXISTS SYS +POSTHOOK: type: DROPDATABASE +PREHOOK: query: CREATE DATABASE SYS +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:SYS +POSTHOOK: query: CREATE DATABASE SYS +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:SYS +PREHOOK: query: USE SYS +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:sys +POSTHOOK: query: USE SYS +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` ( + `SD_ID` bigint, + `BUCKET_COL_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_BUCKETING_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"BUCKET_COL_NAME\", + \"INTEGER_IDX\" +FROM + \"BUCKETING_COLS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@BUCKETING_COLS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` ( + `SD_ID` bigint, + `BUCKET_COL_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_BUCKETING_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"BUCKET_COL_NAME\", + \"INTEGER_IDX\" +FROM + \"BUCKETING_COLS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@BUCKETING_COLS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `CDS` ( + `CD_ID` bigint, + CONSTRAINT `SYS_PK_CDS` PRIMARY KEY (`CD_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CD_ID\" +FROM + \"CDS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@CDS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `CDS` ( + `CD_ID` bigint, + CONSTRAINT `SYS_PK_CDS` PRIMARY KEY (`CD_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CD_ID\" +FROM + \"CDS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@CDS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `COLUMNS_V2` ( + `CD_ID` bigint, + `COMMENT` string, + `COLUMN_NAME` string, + `TYPE_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_COLUMN_V2` PRIMARY KEY (`CD_ID`,`COLUMN_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CD_ID\", + \"COMMENT\", + \"COLUMN_NAME\", + \"TYPE_NAME\", + \"INTEGER_IDX\" +FROM + \"COLUMNS_V2\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@COLUMNS_V2 +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `COLUMNS_V2` ( + `CD_ID` bigint, + `COMMENT` string, + `COLUMN_NAME` string, + `TYPE_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_COLUMN_V2` PRIMARY KEY (`CD_ID`,`COLUMN_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CD_ID\", + \"COMMENT\", + \"COLUMN_NAME\", + \"TYPE_NAME\", + \"INTEGER_IDX\" +FROM + \"COLUMNS_V2\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@COLUMNS_V2 +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` ( + `DB_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_DATABASE_PARAMS` PRIMARY KEY (`DB_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"DATABASE_PARAMS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@DATABASE_PARAMS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` ( + `DB_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_DATABASE_PARAMS` PRIMARY KEY (`DB_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"DATABASE_PARAMS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@DATABASE_PARAMS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `DBS` ( + `DB_ID` bigint, + `DB_LOCATION_URI` string, + `NAME` string, + `OWNER_NAME` string, + `OWNER_TYPE` string, + CONSTRAINT `SYS_PK_DBS` PRIMARY KEY (`DB_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_ID\", + \"DB_LOCATION_URI\", + \"NAME\", + \"OWNER_NAME\", + \"OWNER_TYPE\" +FROM + DBS" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@DBS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `DBS` ( + `DB_ID` bigint, + `DB_LOCATION_URI` string, + `NAME` string, + `OWNER_NAME` string, + `OWNER_TYPE` string, + CONSTRAINT `SYS_PK_DBS` PRIMARY KEY (`DB_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_ID\", + \"DB_LOCATION_URI\", + \"NAME\", + \"OWNER_NAME\", + \"OWNER_TYPE\" +FROM + DBS" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@DBS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `DB_PRIVS` ( + `DB_GRANT_ID` bigint, + `CREATE_TIME` int, + `DB_ID` bigint, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `DB_PRIV` string, + CONSTRAINT `SYS_PK_DB_PRIVS` PRIMARY KEY (`DB_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_GRANT_ID\", + \"CREATE_TIME\", + \"DB_ID\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"DB_PRIV\" +FROM + \"DB_PRIVS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@DB_PRIVS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `DB_PRIVS` ( + `DB_GRANT_ID` bigint, + `CREATE_TIME` int, + `DB_ID` bigint, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `DB_PRIV` string, + CONSTRAINT `SYS_PK_DB_PRIVS` PRIMARY KEY (`DB_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_GRANT_ID\", + \"CREATE_TIME\", + \"DB_ID\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"DB_PRIV\" +FROM + \"DB_PRIVS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@DB_PRIVS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` ( + `USER_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` string, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `USER_PRIV` string, + CONSTRAINT `SYS_PK_GLOBAL_PRIVS` PRIMARY KEY (`USER_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"USER_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"USER_PRIV\" +FROM + \"GLOBAL_PRIVS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@GLOBAL_PRIVS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` ( + `USER_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` string, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `USER_PRIV` string, + CONSTRAINT `SYS_PK_GLOBAL_PRIVS` PRIMARY KEY (`USER_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"USER_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"USER_PRIV\" +FROM + \"GLOBAL_PRIVS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@GLOBAL_PRIVS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `IDXS` ( + `INDEX_ID` bigint, + `CREATE_TIME` int, + `DEFERRED_REBUILD` boolean, + `INDEX_HANDLER_CLASS` string, + `INDEX_NAME` string, + `INDEX_TBL_ID` bigint, + `LAST_ACCESS_TIME` int, + `ORIG_TBL_ID` bigint, + `SD_ID` bigint, + CONSTRAINT `SYS_PK_IDXS` PRIMARY KEY (`INDEX_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"INDEX_ID\", + \"CREATE_TIME\", + \"DEFERRED_REBUILD\", + \"INDEX_HANDLER_CLASS\", + \"INDEX_NAME\", + \"INDEX_TBL_ID\", + \"LAST_ACCESS_TIME\", + \"ORIG_TBL_ID\", + \"SD_ID\" +FROM + \"IDXS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@IDXS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `IDXS` ( + `INDEX_ID` bigint, + `CREATE_TIME` int, + `DEFERRED_REBUILD` boolean, + `INDEX_HANDLER_CLASS` string, + `INDEX_NAME` string, + `INDEX_TBL_ID` bigint, + `LAST_ACCESS_TIME` int, + `ORIG_TBL_ID` bigint, + `SD_ID` bigint, + CONSTRAINT `SYS_PK_IDXS` PRIMARY KEY (`INDEX_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"INDEX_ID\", + \"CREATE_TIME\", + \"DEFERRED_REBUILD\", + \"INDEX_HANDLER_CLASS\", + \"INDEX_NAME\", + \"INDEX_TBL_ID\", + \"LAST_ACCESS_TIME\", + \"ORIG_TBL_ID\", + \"SD_ID\" +FROM + \"IDXS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@IDXS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` ( + `INDEX_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_INDEX_PARAMS` PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"INDEX_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"INDEX_PARAMS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@INDEX_PARAMS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` ( + `INDEX_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_INDEX_PARAMS` PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"INDEX_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"INDEX_PARAMS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@INDEX_PARAMS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `PARTITIONS` ( + `PART_ID` bigint, + `CREATE_TIME` int, + `LAST_ACCESS_TIME` int, + `PART_NAME` string, + `SD_ID` bigint, + `TBL_ID` bigint, + CONSTRAINT `SYS_PK_PARTITIONS` PRIMARY KEY (`PART_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"CREATE_TIME\", + \"LAST_ACCESS_TIME\", + \"PART_NAME\", + \"SD_ID\", + \"TBL_ID\" +FROM + \"PARTITIONS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@PARTITIONS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `PARTITIONS` ( + `PART_ID` bigint, + `CREATE_TIME` int, + `LAST_ACCESS_TIME` int, + `PART_NAME` string, + `SD_ID` bigint, + `TBL_ID` bigint, + CONSTRAINT `SYS_PK_PARTITIONS` PRIMARY KEY (`PART_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"CREATE_TIME\", + \"LAST_ACCESS_TIME\", + \"PART_NAME\", + \"SD_ID\", + \"TBL_ID\" +FROM + \"PARTITIONS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@PARTITIONS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` ( + `TBL_ID` bigint, + `PKEY_COMMENT` string, + `PKEY_NAME` string, + `PKEY_TYPE` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_PARTITION_KEYS` PRIMARY KEY (`TBL_ID`,`PKEY_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"PKEY_COMMENT\", + \"PKEY_NAME\", + \"PKEY_TYPE\", + \"INTEGER_IDX\" +FROM + \"PARTITION_KEYS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@PARTITION_KEYS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` ( + `TBL_ID` bigint, + `PKEY_COMMENT` string, + `PKEY_NAME` string, + `PKEY_TYPE` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_PARTITION_KEYS` PRIMARY KEY (`TBL_ID`,`PKEY_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"PKEY_COMMENT\", + \"PKEY_NAME\", + \"PKEY_TYPE\", + \"INTEGER_IDX\" +FROM + \"PARTITION_KEYS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@PARTITION_KEYS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` ( + `PART_ID` bigint, + `PART_KEY_VAL` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_PARTITION_KEY_VALS` PRIMARY KEY (`PART_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"PART_KEY_VAL\", + \"INTEGER_IDX\" +FROM + \"PARTITION_KEY_VALS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@PARTITION_KEY_VALS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` ( + `PART_ID` bigint, + `PART_KEY_VAL` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_PARTITION_KEY_VALS` PRIMARY KEY (`PART_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"PART_KEY_VAL\", + \"INTEGER_IDX\" +FROM + \"PARTITION_KEY_VALS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@PARTITION_KEY_VALS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` ( + `PART_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_PARTITION_PARAMS` PRIMARY KEY (`PART_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"PARTITION_PARAMS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@PARTITION_PARAMS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` ( + `PART_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_PARTITION_PARAMS` PRIMARY KEY (`PART_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"PARTITION_PARAMS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@PARTITION_PARAMS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` ( + `PART_COLUMN_GRANT_ID` bigint, + `COLUMN_NAME` string, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PART_ID` bigint, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `PART_COL_PRIV` string, + CONSTRAINT `SYS_PK_PART_COL_PRIVS` PRIMARY KEY (`PART_COLUMN_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_COLUMN_GRANT_ID\", + \"COLUMN_NAME\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PART_ID\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"PART_COL_PRIV\" +FROM + \"PART_COL_PRIVS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@PART_COL_PRIVS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` ( + `PART_COLUMN_GRANT_ID` bigint, + `COLUMN_NAME` string, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PART_ID` bigint, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `PART_COL_PRIV` string, + CONSTRAINT `SYS_PK_PART_COL_PRIVS` PRIMARY KEY (`PART_COLUMN_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_COLUMN_GRANT_ID\", + \"COLUMN_NAME\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PART_ID\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"PART_COL_PRIV\" +FROM + \"PART_COL_PRIVS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@PART_COL_PRIVS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `PART_PRIVS` ( + `PART_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PART_ID` bigint, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `PART_PRIV` string, + CONSTRAINT `SYS_PK_PART_PRIVS` PRIMARY KEY (`PART_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PART_ID\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"PART_PRIV\" +FROM + \"PART_PRIVS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@PART_PRIVS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `PART_PRIVS` ( + `PART_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PART_ID` bigint, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `PART_PRIV` string, + CONSTRAINT `SYS_PK_PART_PRIVS` PRIMARY KEY (`PART_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PART_ID\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"PART_PRIV\" +FROM + \"PART_PRIVS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@PART_PRIVS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `ROLES` ( + `ROLE_ID` bigint, + `CREATE_TIME` int, + `OWNER_NAME` string, + `ROLE_NAME` string, + CONSTRAINT `SYS_PK_ROLES` PRIMARY KEY (`ROLE_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"ROLE_ID\", + \"CREATE_TIME\", + \"OWNER_NAME\", + \"ROLE_NAME\" +FROM + \"ROLES\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@ROLES +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `ROLES` ( + `ROLE_ID` bigint, + `CREATE_TIME` int, + `OWNER_NAME` string, + `ROLE_NAME` string, + CONSTRAINT `SYS_PK_ROLES` PRIMARY KEY (`ROLE_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"ROLE_ID\", + \"CREATE_TIME\", + \"OWNER_NAME\", + \"ROLE_NAME\" +FROM + \"ROLES\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@ROLES +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `ROLE_MAP` ( + `ROLE_GRANT_ID` bigint, + `ADD_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `ROLE_ID` bigint, + CONSTRAINT `SYS_PK_ROLE_MAP` PRIMARY KEY (`ROLE_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"ROLE_GRANT_ID\", + \"ADD_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"ROLE_ID\" +FROM + \"ROLE_MAP\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@ROLE_MAP +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `ROLE_MAP` ( + `ROLE_GRANT_ID` bigint, + `ADD_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `ROLE_ID` bigint, + CONSTRAINT `SYS_PK_ROLE_MAP` PRIMARY KEY (`ROLE_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"ROLE_GRANT_ID\", + \"ADD_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"ROLE_ID\" +FROM + \"ROLE_MAP\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@ROLE_MAP +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `SDS` ( + `SD_ID` bigint, + `CD_ID` bigint, + `INPUT_FORMAT` string, + `IS_COMPRESSED` boolean, + `IS_STOREDASSUBDIRECTORIES` boolean, + `LOCATION` string, + `NUM_BUCKETS` int, + `OUTPUT_FORMAT` string, + `SERDE_ID` bigint, + CONSTRAINT `SYS_PK_SDS` PRIMARY KEY (`SD_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"CD_ID\", + \"INPUT_FORMAT\", + \"IS_COMPRESSED\", + \"IS_STOREDASSUBDIRECTORIES\", + \"LOCATION\", + \"NUM_BUCKETS\", + \"OUTPUT_FORMAT\", + \"SERDE_ID\" +FROM + \"SDS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SDS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `SDS` ( + `SD_ID` bigint, + `CD_ID` bigint, + `INPUT_FORMAT` string, + `IS_COMPRESSED` boolean, + `IS_STOREDASSUBDIRECTORIES` boolean, + `LOCATION` string, + `NUM_BUCKETS` int, + `OUTPUT_FORMAT` string, + `SERDE_ID` bigint, + CONSTRAINT `SYS_PK_SDS` PRIMARY KEY (`SD_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"CD_ID\", + \"INPUT_FORMAT\", + \"IS_COMPRESSED\", + \"IS_STOREDASSUBDIRECTORIES\", + \"LOCATION\", + \"NUM_BUCKETS\", + \"OUTPUT_FORMAT\", + \"SERDE_ID\" +FROM + \"SDS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SDS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `SD_PARAMS` ( + `SD_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_SD_PARAMS` PRIMARY KEY (`SD_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"SD_PARAMS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SD_PARAMS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `SD_PARAMS` ( + `SD_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_SD_PARAMS` PRIMARY KEY (`SD_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"SD_PARAMS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SD_PARAMS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` ( + `SEQUENCE_NAME` string, + `NEXT_VAL` bigint, + CONSTRAINT `SYS_PK_SEQUENCE_TABLE` PRIMARY KEY (`SEQUENCE_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SEQUENCE_NAME\", + \"NEXT_VAL\" +FROM + \"SEQUENCE_TABLE\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SEQUENCE_TABLE +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` ( + `SEQUENCE_NAME` string, + `NEXT_VAL` bigint, + CONSTRAINT `SYS_PK_SEQUENCE_TABLE` PRIMARY KEY (`SEQUENCE_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SEQUENCE_NAME\", + \"NEXT_VAL\" +FROM + \"SEQUENCE_TABLE\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SEQUENCE_TABLE +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `SERDES` ( + `SERDE_ID` bigint, + `NAME` string, + `SLIB` string, + CONSTRAINT `SYS_PK_SERDES` PRIMARY KEY (`SERDE_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SERDE_ID\", + \"NAME\", + \"SLIB\" +FROM + \"SERDES\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SERDES +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `SERDES` ( + `SERDE_ID` bigint, + `NAME` string, + `SLIB` string, + CONSTRAINT `SYS_PK_SERDES` PRIMARY KEY (`SERDE_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SERDE_ID\", + \"NAME\", + \"SLIB\" +FROM + \"SERDES\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SERDES +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` ( + `SERDE_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_SERDE_PARAMS` PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SERDE_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"SERDE_PARAMS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SERDE_PARAMS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` ( + `SERDE_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_SERDE_PARAMS` PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SERDE_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"SERDE_PARAMS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SERDE_PARAMS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` ( + `SD_ID` bigint, + `SKEWED_COL_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_COL_NAMES` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"SKEWED_COL_NAME\", + \"INTEGER_IDX\" +FROM + \"SKEWED_COL_NAMES\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SKEWED_COL_NAMES +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` ( + `SD_ID` bigint, + `SKEWED_COL_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_COL_NAMES` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"SKEWED_COL_NAME\", + \"INTEGER_IDX\" +FROM + \"SKEWED_COL_NAMES\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SKEWED_COL_NAMES +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` ( + `SD_ID` bigint, + `STRING_LIST_ID_KID` bigint, + `LOCATION` string, + CONSTRAINT `SYS_PK_COL_VALUE_LOC_MAP` PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"STRING_LIST_ID_KID\", + \"LOCATION\" +FROM + \"SKEWED_COL_VALUE_LOC_MAP\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SKEWED_COL_VALUE_LOC_MAP +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` ( + `SD_ID` bigint, + `STRING_LIST_ID_KID` bigint, + `LOCATION` string, + CONSTRAINT `SYS_PK_COL_VALUE_LOC_MAP` PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"STRING_LIST_ID_KID\", + \"LOCATION\" +FROM + \"SKEWED_COL_VALUE_LOC_MAP\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SKEWED_COL_VALUE_LOC_MAP +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` ( + `STRING_LIST_ID` bigint, + CONSTRAINT `SYS_PK_SKEWED_STRING_LIST` PRIMARY KEY (`STRING_LIST_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"STRING_LIST_ID\" +FROM + \"SKEWED_STRING_LIST\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SKEWED_STRING_LIST +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` ( + `STRING_LIST_ID` bigint, + CONSTRAINT `SYS_PK_SKEWED_STRING_LIST` PRIMARY KEY (`STRING_LIST_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"STRING_LIST_ID\" +FROM + \"SKEWED_STRING_LIST\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SKEWED_STRING_LIST +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` ( + `STRING_LIST_ID` bigint, + `STRING_LIST_VALUE` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_STRING_LIST_VALUES` PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"STRING_LIST_ID\", + \"STRING_LIST_VALUE\", + \"INTEGER_IDX\" +FROM + \"SKEWED_STRING_LIST_VALUES\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SKEWED_STRING_LIST_VALUES +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` ( + `STRING_LIST_ID` bigint, + `STRING_LIST_VALUE` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_STRING_LIST_VALUES` PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"STRING_LIST_ID\", + \"STRING_LIST_VALUE\", + \"INTEGER_IDX\" +FROM + \"SKEWED_STRING_LIST_VALUES\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SKEWED_STRING_LIST_VALUES +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` ( + `SD_ID_OID` bigint, + `STRING_LIST_ID_EID` bigint, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_VALUES` PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID_OID\", + \"STRING_LIST_ID_EID\", + \"INTEGER_IDX\" +FROM + \"SKEWED_VALUES\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SKEWED_VALUES +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` ( + `SD_ID_OID` bigint, + `STRING_LIST_ID_EID` bigint, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_VALUES` PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID_OID\", + \"STRING_LIST_ID_EID\", + \"INTEGER_IDX\" +FROM + \"SKEWED_VALUES\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SKEWED_VALUES +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `SORT_COLS` ( + `SD_ID` bigint, + `COLUMN_NAME` string, + `ORDER` int, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SORT_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"COLUMN_NAME\", + \"ORDER\", + \"INTEGER_IDX\" +FROM + \"SORT_COLS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@SORT_COLS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `SORT_COLS` ( + `SD_ID` bigint, + `COLUMN_NAME` string, + `ORDER` int, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SORT_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"COLUMN_NAME\", + \"ORDER\", + \"INTEGER_IDX\" +FROM + \"SORT_COLS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@SORT_COLS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` ( + `TBL_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_TABLE_PARAMS` PRIMARY KEY (`TBL_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"TABLE_PARAMS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@TABLE_PARAMS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` ( + `TBL_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_TABLE_PARAMS` PRIMARY KEY (`TBL_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"TABLE_PARAMS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@TABLE_PARAMS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `TBLS` ( + `TBL_ID` bigint, + `CREATE_TIME` int, + `DB_ID` bigint, + `LAST_ACCESS_TIME` int, + `OWNER` string, + `RETENTION` int, + `SD_ID` bigint, + `TBL_NAME` string, + `TBL_TYPE` string, + `VIEW_EXPANDED_TEXT` string, + `VIEW_ORIGINAL_TEXT` string, + `IS_REWRITE_ENABLED` boolean, + CONSTRAINT `SYS_PK_TBLS` PRIMARY KEY (`TBL_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"CREATE_TIME\", + \"DB_ID\", + \"LAST_ACCESS_TIME\", + \"OWNER\", + \"RETENTION\", + \"SD_ID\", + \"TBL_NAME\", + \"TBL_TYPE\", + \"VIEW_EXPANDED_TEXT\", + \"VIEW_ORIGINAL_TEXT\", + \"IS_REWRITE_ENABLED\" +FROM TBLS" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@TBLS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `TBLS` ( + `TBL_ID` bigint, + `CREATE_TIME` int, + `DB_ID` bigint, + `LAST_ACCESS_TIME` int, + `OWNER` string, + `RETENTION` int, + `SD_ID` bigint, + `TBL_NAME` string, + `TBL_TYPE` string, + `VIEW_EXPANDED_TEXT` string, + `VIEW_ORIGINAL_TEXT` string, + `IS_REWRITE_ENABLED` boolean, + CONSTRAINT `SYS_PK_TBLS` PRIMARY KEY (`TBL_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"CREATE_TIME\", + \"DB_ID\", + \"LAST_ACCESS_TIME\", + \"OWNER\", + \"RETENTION\", + \"SD_ID\", + \"TBL_NAME\", + \"TBL_TYPE\", + \"VIEW_EXPANDED_TEXT\", + \"VIEW_ORIGINAL_TEXT\", + \"IS_REWRITE_ENABLED\" +FROM TBLS" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@TBLS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` ( + `TBL_COLUMN_GRANT_ID` bigint, + `COLUMN_NAME` string, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `TBL_COL_PRIV` string, + `TBL_ID` bigint, + CONSTRAINT `SYS_PK_TBL_COL_PRIVS` PRIMARY KEY (`TBL_COLUMN_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_COLUMN_GRANT_ID\", + \"COLUMN_NAME\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"TBL_COL_PRIV\", + \"TBL_ID\" +FROM + \"TBL_COL_PRIVS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@TBL_COL_PRIVS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` ( + `TBL_COLUMN_GRANT_ID` bigint, + `COLUMN_NAME` string, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `TBL_COL_PRIV` string, + `TBL_ID` bigint, + CONSTRAINT `SYS_PK_TBL_COL_PRIVS` PRIMARY KEY (`TBL_COLUMN_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_COLUMN_GRANT_ID\", + \"COLUMN_NAME\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"TBL_COL_PRIV\", + \"TBL_ID\" +FROM + \"TBL_COL_PRIVS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@TBL_COL_PRIVS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `TBL_PRIVS` ( + `TBL_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `TBL_PRIV` string, + `TBL_ID` bigint, + CONSTRAINT `SYS_PK_TBL_PRIVS` PRIMARY KEY (`TBL_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"TBL_PRIV\", + \"TBL_ID\" +FROM + \"TBL_PRIVS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@TBL_PRIVS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `TBL_PRIVS` ( + `TBL_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `TBL_PRIV` string, + `TBL_ID` bigint, + CONSTRAINT `SYS_PK_TBL_PRIVS` PRIMARY KEY (`TBL_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"TBL_PRIV\", + \"TBL_ID\" +FROM + \"TBL_PRIVS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@TBL_PRIVS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` ( + `CS_ID` bigint, + `DB_NAME` string, + `TABLE_NAME` string, + `COLUMN_NAME` string, + `COLUMN_TYPE` string, + `TBL_ID` bigint, + `LONG_LOW_VALUE` bigint, + `LONG_HIGH_VALUE` bigint, + `DOUBLE_HIGH_VALUE` double, + `DOUBLE_LOW_VALUE` double, + `BIG_DECIMAL_LOW_VALUE` string, + `BIG_DECIMAL_HIGH_VALUE` string, + `NUM_NULLS` bigint, + `NUM_DISTINCTS` bigint, + `AVG_COL_LEN` double, + `MAX_COL_LEN` bigint, + `NUM_TRUES` bigint, + `NUM_FALSES` bigint, + `LAST_ANALYZED` bigint, + CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CS_ID\", + \"DB_NAME\", + \"TABLE_NAME\", + \"COLUMN_NAME\", + \"COLUMN_TYPE\", + \"TBL_ID\", + \"LONG_LOW_VALUE\", + \"LONG_HIGH_VALUE\", + \"DOUBLE_HIGH_VALUE\", + \"DOUBLE_LOW_VALUE\", + \"BIG_DECIMAL_LOW_VALUE\", + \"BIG_DECIMAL_HIGH_VALUE\", + \"NUM_NULLS\", + \"NUM_DISTINCTS\", + \"AVG_COL_LEN\", + \"MAX_COL_LEN\", + \"NUM_TRUES\", + \"NUM_FALSES\", + \"LAST_ANALYZED\" +FROM + \"TAB_COL_STATS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@TAB_COL_STATS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` ( + `CS_ID` bigint, + `DB_NAME` string, + `TABLE_NAME` string, + `COLUMN_NAME` string, + `COLUMN_TYPE` string, + `TBL_ID` bigint, + `LONG_LOW_VALUE` bigint, + `LONG_HIGH_VALUE` bigint, + `DOUBLE_HIGH_VALUE` double, + `DOUBLE_LOW_VALUE` double, + `BIG_DECIMAL_LOW_VALUE` string, + `BIG_DECIMAL_HIGH_VALUE` string, + `NUM_NULLS` bigint, + `NUM_DISTINCTS` bigint, + `AVG_COL_LEN` double, + `MAX_COL_LEN` bigint, + `NUM_TRUES` bigint, + `NUM_FALSES` bigint, + `LAST_ANALYZED` bigint, + CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CS_ID\", + \"DB_NAME\", + \"TABLE_NAME\", + \"COLUMN_NAME\", + \"COLUMN_TYPE\", + \"TBL_ID\", + \"LONG_LOW_VALUE\", + \"LONG_HIGH_VALUE\", + \"DOUBLE_HIGH_VALUE\", + \"DOUBLE_LOW_VALUE\", + \"BIG_DECIMAL_LOW_VALUE\", + \"BIG_DECIMAL_HIGH_VALUE\", + \"NUM_NULLS\", + \"NUM_DISTINCTS\", + \"AVG_COL_LEN\", + \"MAX_COL_LEN\", + \"NUM_TRUES\", + \"NUM_FALSES\", + \"LAST_ANALYZED\" +FROM + \"TAB_COL_STATS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@TAB_COL_STATS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `PART_COL_STATS` ( + `CS_ID` bigint, + `DB_NAME` string, + `TABLE_NAME` string, + `PARTITION_NAME` string, + `COLUMN_NAME` string, + `COLUMN_TYPE` string, + `PART_ID` bigint, + `LONG_LOW_VALUE` bigint, + `LONG_HIGH_VALUE` bigint, + `DOUBLE_HIGH_VALUE` double, + `DOUBLE_LOW_VALUE` double, + `BIG_DECIMAL_LOW_VALUE` string, + `BIG_DECIMAL_HIGH_VALUE` string, + `NUM_NULLS` bigint, + `NUM_DISTINCTS` bigint, + `AVG_COL_LEN` double, + `MAX_COL_LEN` bigint, + `NUM_TRUES` bigint, + `NUM_FALSES` bigint, + `LAST_ANALYZED` bigint, + CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CS_ID\", + \"DB_NAME\", + \"TABLE_NAME\", + \"PARTITION_NAME\", + \"COLUMN_NAME\", + \"COLUMN_TYPE\", + \"PART_ID\", + \"LONG_LOW_VALUE\", + \"LONG_HIGH_VALUE\", + \"DOUBLE_HIGH_VALUE\", + \"DOUBLE_LOW_VALUE\", + \"BIG_DECIMAL_LOW_VALUE\", + \"BIG_DECIMAL_HIGH_VALUE\", + \"NUM_NULLS\", + \"NUM_DISTINCTS\", + \"AVG_COL_LEN\", + \"MAX_COL_LEN\", + \"NUM_TRUES\", + \"NUM_FALSES\", + \"LAST_ANALYZED\" +FROM + \"PART_COL_STATS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@PART_COL_STATS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `PART_COL_STATS` ( + `CS_ID` bigint, + `DB_NAME` string, + `TABLE_NAME` string, + `PARTITION_NAME` string, + `COLUMN_NAME` string, + `COLUMN_TYPE` string, + `PART_ID` bigint, + `LONG_LOW_VALUE` bigint, + `LONG_HIGH_VALUE` bigint, + `DOUBLE_HIGH_VALUE` double, + `DOUBLE_LOW_VALUE` double, + `BIG_DECIMAL_LOW_VALUE` string, + `BIG_DECIMAL_HIGH_VALUE` string, + `NUM_NULLS` bigint, + `NUM_DISTINCTS` bigint, + `AVG_COL_LEN` double, + `MAX_COL_LEN` bigint, + `NUM_TRUES` bigint, + `NUM_FALSES` bigint, + `LAST_ANALYZED` bigint, + CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CS_ID\", + \"DB_NAME\", + \"TABLE_NAME\", + \"PARTITION_NAME\", + \"COLUMN_NAME\", + \"COLUMN_TYPE\", + \"PART_ID\", + \"LONG_LOW_VALUE\", + \"LONG_HIGH_VALUE\", + \"DOUBLE_HIGH_VALUE\", + \"DOUBLE_LOW_VALUE\", + \"BIG_DECIMAL_LOW_VALUE\", + \"BIG_DECIMAL_HIGH_VALUE\", + \"NUM_NULLS\", + \"NUM_DISTINCTS\", + \"AVG_COL_LEN\", + \"MAX_COL_LEN\", + \"NUM_TRUES\", + \"NUM_FALSES\", + \"LAST_ANALYZED\" +FROM + \"PART_COL_STATS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@PART_COL_STATS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `VERSION` ( + `VER_ID` BIGINT, + `SCHEMA_VERSION` string, + `VERSION_COMMENT` string, + CONSTRAINT `SYS_PK_VERSION` PRIMARY KEY (`VER_ID`) DISABLE +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@VERSION +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `VERSION` ( + `VER_ID` BIGINT, + `SCHEMA_VERSION` string, + `VERSION_COMMENT` string, + CONSTRAINT `SYS_PK_VERSION` PRIMARY KEY (`VER_ID`) DISABLE +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@VERSION +POSTHOOK: Output: database:sys +PREHOOK: query: INSERT INTO `VERSION` VALUES (1, '3.0.0', 'Hive release version 3.0.0') +PREHOOK: type: QUERY +PREHOOK: Output: sys@version +POSTHOOK: query: INSERT INTO `VERSION` VALUES (1, '3.0.0', 'Hive release version 3.0.0') +POSTHOOK: type: QUERY +POSTHOOK: Output: sys@version +POSTHOOK: Lineage: version.schema_version SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: version.ver_id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: version.version_comment SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ] +PREHOOK: query: CREATE TABLE IF NOT EXISTS `DB_VERSION` ( + `VER_ID` BIGINT, + `SCHEMA_VERSION` string, + `VERSION_COMMENT` string, + CONSTRAINT `SYS_PK_DB_VERSION` PRIMARY KEY (`VER_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"VER_ID\", + \"SCHEMA_VERSION\", + \"VERSION_COMMENT\" +FROM + \"VERSION\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@DB_VERSION +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `DB_VERSION` ( + `VER_ID` BIGINT, + `SCHEMA_VERSION` string, + `VERSION_COMMENT` string, + CONSTRAINT `SYS_PK_DB_VERSION` PRIMARY KEY (`VER_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"VER_ID\", + \"SCHEMA_VERSION\", + \"VERSION_COMMENT\" +FROM + \"VERSION\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@DB_VERSION +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `FUNCS` ( + `FUNC_ID` bigint, + `CLASS_NAME` string, + `CREATE_TIME` int, + `DB_ID` bigint, + `FUNC_NAME` string, + `FUNC_TYPE` int, + `OWNER_NAME` string, + `OWNER_TYPE` string, + CONSTRAINT `SYS_PK_FUNCS` PRIMARY KEY (`FUNC_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"FUNC_ID\", + \"CLASS_NAME\", + \"CREATE_TIME\", + \"DB_ID\", + \"FUNC_NAME\", + \"FUNC_TYPE\", + \"OWNER_NAME\", + \"OWNER_TYPE\" +FROM + \"FUNCS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@FUNCS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `FUNCS` ( + `FUNC_ID` bigint, + `CLASS_NAME` string, + `CREATE_TIME` int, + `DB_ID` bigint, + `FUNC_NAME` string, + `FUNC_TYPE` int, + `OWNER_NAME` string, + `OWNER_TYPE` string, + CONSTRAINT `SYS_PK_FUNCS` PRIMARY KEY (`FUNC_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"FUNC_ID\", + \"CLASS_NAME\", + \"CREATE_TIME\", + \"DB_ID\", + \"FUNC_NAME\", + \"FUNC_TYPE\", + \"OWNER_NAME\", + \"OWNER_TYPE\" +FROM + \"FUNCS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@FUNCS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS` +( + `CHILD_CD_ID` bigint, + `CHILD_INTEGER_IDX` int, + `CHILD_TBL_ID` bigint, + `PARENT_CD_ID` bigint, + `PARENT_INTEGER_IDX` int, + `PARENT_TBL_ID` bigint, + `POSITION` bigint, + `CONSTRAINT_NAME` string, + `CONSTRAINT_TYPE` string, + `UPDATE_RULE` string, + `DELETE_RULE` string, + `ENABLE_VALIDATE_RELY` int, + CONSTRAINT `SYS_PK_KEY_CONSTRAINTS` PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CHILD_CD_ID\", + \"CHILD_INTEGER_IDX\", + \"CHILD_TBL_ID\", + \"PARENT_CD_ID\", + \"PARENT_INTEGER_IDX\", + \"PARENT_TBL_ID\", + \"POSITION\", + \"CONSTRAINT_NAME\", + \"CONSTRAINT_TYPE\", + \"UPDATE_RULE\", + \"DELETE_RULE\", + \"ENABLE_VALIDATE_RELY\" +FROM + \"KEY_CONSTRAINTS\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@KEY_CONSTRAINTS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS` +( + `CHILD_CD_ID` bigint, + `CHILD_INTEGER_IDX` int, + `CHILD_TBL_ID` bigint, + `PARENT_CD_ID` bigint, + `PARENT_INTEGER_IDX` int, + `PARENT_TBL_ID` bigint, + `POSITION` bigint, + `CONSTRAINT_NAME` string, + `CONSTRAINT_TYPE` string, + `UPDATE_RULE` string, + `DELETE_RULE` string, + `ENABLE_VALIDATE_RELY` int, + CONSTRAINT `SYS_PK_KEY_CONSTRAINTS` PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CHILD_CD_ID\", + \"CHILD_INTEGER_IDX\", + \"CHILD_TBL_ID\", + \"PARENT_CD_ID\", + \"PARENT_INTEGER_IDX\", + \"PARENT_TBL_ID\", + \"POSITION\", + \"CONSTRAINT_NAME\", + \"CONSTRAINT_TYPE\", + \"UPDATE_RULE\", + \"DELETE_RULE\", + \"ENABLE_VALIDATE_RELY\" +FROM + \"KEY_CONSTRAINTS\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@KEY_CONSTRAINTS +POSTHOOK: Output: database:sys +PREHOOK: query: CREATE VIEW `TABLE_STATS_VIEW` AS +SELECT + `TBL_ID`, + max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, + max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, + max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, + max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, + max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, +#### A masked pattern was here #### +FROM `TABLE_PARAMS` GROUP BY `TBL_ID` +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@table_params +PREHOOK: Output: SYS@TABLE_STATS_VIEW +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE VIEW `TABLE_STATS_VIEW` AS +SELECT + `TBL_ID`, + max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, + max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, + max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, + max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, + max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, +#### A masked pattern was here #### +FROM `TABLE_PARAMS` GROUP BY `TBL_ID` +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@table_params +POSTHOOK: Output: SYS@TABLE_STATS_VIEW +POSTHOOK: Output: database:sys +POSTHOOK: Lineage: TABLE_STATS_VIEW.column_stats_accurate EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_STATS_VIEW.num_files EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_STATS_VIEW.num_rows EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_STATS_VIEW.raw_data_size EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_STATS_VIEW.tbl_id SIMPLE [(table_params)table_params.FieldSchema(name:tbl_id, type:bigint, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_STATS_VIEW.total_size EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_STATS_VIEW.transient_last_ddl_time EXPRESSION [(table_params)table_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (table_params)table_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +PREHOOK: query: CREATE VIEW `PARTITION_STATS_VIEW` AS +SELECT + `PART_ID`, + max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, + max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, + max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, + max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, + max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, +#### A masked pattern was here #### +FROM `PARTITION_PARAMS` GROUP BY `PART_ID` +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@partition_params +PREHOOK: Output: SYS@PARTITION_STATS_VIEW +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE VIEW `PARTITION_STATS_VIEW` AS +SELECT + `PART_ID`, + max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, + max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, + max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, + max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, + max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, +#### A masked pattern was here #### +FROM `PARTITION_PARAMS` GROUP BY `PART_ID` +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@partition_params +POSTHOOK: Output: SYS@PARTITION_STATS_VIEW +POSTHOOK: Output: database:sys +POSTHOOK: Lineage: PARTITION_STATS_VIEW.column_stats_accurate EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: PARTITION_STATS_VIEW.num_files EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: PARTITION_STATS_VIEW.num_rows EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: PARTITION_STATS_VIEW.part_id SIMPLE [(partition_params)partition_params.FieldSchema(name:part_id, type:bigint, comment:from deserializer), ] +POSTHOOK: Lineage: PARTITION_STATS_VIEW.raw_data_size EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: PARTITION_STATS_VIEW.total_size EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: PARTITION_STATS_VIEW.transient_last_ddl_time EXPRESSION [(partition_params)partition_params.FieldSchema(name:param_key, type:string, comment:from deserializer), (partition_params)partition_params.FieldSchema(name:param_value, type:string, comment:from deserializer), ] +PREHOOK: query: CREATE TABLE IF NOT EXISTS `WM_RESOURCEPLANS` ( + `NAME` string, + `STATUS` string, + `QUERY_PARALLELISM` int +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"NAME\", + \"STATUS\", + \"QUERY_PARALLELISM\" +FROM + \"WM_RESOURCEPLAN\"" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: SYS@WM_RESOURCEPLANS +PREHOOK: Output: database:sys +POSTHOOK: query: CREATE TABLE IF NOT EXISTS `WM_RESOURCEPLANS` ( + `NAME` string, + `STATUS` string, + `QUERY_PARALLELISM` int +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"NAME\", + \"STATUS\", + \"QUERY_PARALLELISM\" +FROM + \"WM_RESOURCEPLAN\"" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: SYS@WM_RESOURCEPLANS +POSTHOOK: Output: database:sys +PREHOOK: query: DROP DATABASE IF EXISTS INFORMATION_SCHEMA +PREHOOK: type: DROPDATABASE +POSTHOOK: query: DROP DATABASE IF EXISTS INFORMATION_SCHEMA +POSTHOOK: type: DROPDATABASE +PREHOOK: query: CREATE DATABASE INFORMATION_SCHEMA +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:INFORMATION_SCHEMA +POSTHOOK: query: CREATE DATABASE INFORMATION_SCHEMA +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:INFORMATION_SCHEMA +PREHOOK: query: USE INFORMATION_SCHEMA +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:information_schema +POSTHOOK: query: USE INFORMATION_SCHEMA +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:information_schema +PREHOOK: query: CREATE VIEW IF NOT EXISTS `SCHEMATA` +( + `CATALOG_NAME`, + `SCHEMA_NAME`, + `SCHEMA_OWNER`, + `DEFAULT_CHARACTER_SET_CATALOG`, + `DEFAULT_CHARACTER_SET_SCHEMA`, + `DEFAULT_CHARACTER_SET_NAME`, + `SQL_PATH` +) AS +SELECT + 'default', + `NAME`, + `OWNER_NAME`, + cast(null as string), + cast(null as string), + cast(null as string), + `DB_LOCATION_URI` +FROM + sys.DBS +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@dbs +PREHOOK: Output: INFORMATION_SCHEMA@SCHEMATA +PREHOOK: Output: database:information_schema +POSTHOOK: query: CREATE VIEW IF NOT EXISTS `SCHEMATA` +( + `CATALOG_NAME`, + `SCHEMA_NAME`, + `SCHEMA_OWNER`, + `DEFAULT_CHARACTER_SET_CATALOG`, + `DEFAULT_CHARACTER_SET_SCHEMA`, + `DEFAULT_CHARACTER_SET_NAME`, + `SQL_PATH` +) AS +SELECT + 'default', + `NAME`, + `OWNER_NAME`, + cast(null as string), + cast(null as string), + cast(null as string), + `DB_LOCATION_URI` +FROM + sys.DBS +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@dbs +POSTHOOK: Output: INFORMATION_SCHEMA@SCHEMATA +POSTHOOK: Output: database:information_schema +POSTHOOK: Lineage: SCHEMATA.catalog_name SIMPLE [] +POSTHOOK: Lineage: SCHEMATA.default_character_set_catalog EXPRESSION [] +POSTHOOK: Lineage: SCHEMATA.default_character_set_name EXPRESSION [] +POSTHOOK: Lineage: SCHEMATA.default_character_set_schema EXPRESSION [] +POSTHOOK: Lineage: SCHEMATA.schema_name SIMPLE [(dbs)dbs.FieldSchema(name:name, type:string, comment:from deserializer), ] +#### A masked pattern was here #### +POSTHOOK: Lineage: SCHEMATA.sql_path SIMPLE [(dbs)dbs.FieldSchema(name:db_location_uri, type:string, comment:from deserializer), ] +PREHOOK: query: CREATE VIEW IF NOT EXISTS `TABLES` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `TABLE_TYPE`, + `SELF_REFERENCING_COLUMN_NAME`, + `REFERENCE_GENERATION`, + `USER_DEFINED_TYPE_CATALOG`, + `USER_DEFINED_TYPE_SCHEMA`, + `USER_DEFINED_TYPE_NAME`, + `IS_INSERTABLE_INTO`, + `IS_TYPED`, + `COMMIT_ACTION` +) AS +SELECT + 'default', + D.NAME, + T.TBL_NAME, + IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'VIEW', 'BASE_TABLE'), + cast(null as string), + cast(null as string), + cast(null as string), + cast(null as string), + cast(null as string), + IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'NO', 'YES'), + 'NO', + cast(null as string) +FROM + `sys`.`TBLS` T, `sys`.`DBS` D +WHERE + D.`DB_ID` = T.`DB_ID` +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@dbs +PREHOOK: Input: sys@tbls +PREHOOK: Output: INFORMATION_SCHEMA@TABLES +PREHOOK: Output: database:information_schema +POSTHOOK: query: CREATE VIEW IF NOT EXISTS `TABLES` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `TABLE_TYPE`, + `SELF_REFERENCING_COLUMN_NAME`, + `REFERENCE_GENERATION`, + `USER_DEFINED_TYPE_CATALOG`, + `USER_DEFINED_TYPE_SCHEMA`, + `USER_DEFINED_TYPE_NAME`, + `IS_INSERTABLE_INTO`, + `IS_TYPED`, + `COMMIT_ACTION` +) AS +SELECT + 'default', + D.NAME, + T.TBL_NAME, + IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'VIEW', 'BASE_TABLE'), + cast(null as string), + cast(null as string), + cast(null as string), + cast(null as string), + cast(null as string), + IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'NO', 'YES'), + 'NO', + cast(null as string) +FROM + `sys`.`TBLS` T, `sys`.`DBS` D +WHERE + D.`DB_ID` = T.`DB_ID` +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@dbs +POSTHOOK: Input: sys@tbls +POSTHOOK: Output: INFORMATION_SCHEMA@TABLES +POSTHOOK: Output: database:information_schema +POSTHOOK: Lineage: TABLES.commit_action EXPRESSION [] +POSTHOOK: Lineage: TABLES.is_insertable_into EXPRESSION [(tbls)t.FieldSchema(name:view_original_text, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLES.is_typed SIMPLE [] +POSTHOOK: Lineage: TABLES.reference_generation EXPRESSION [] +POSTHOOK: Lineage: TABLES.self_referencing_column_name EXPRESSION [] +POSTHOOK: Lineage: TABLES.table_catalog SIMPLE [] +POSTHOOK: Lineage: TABLES.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLES.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLES.table_type EXPRESSION [(tbls)t.FieldSchema(name:view_original_text, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLES.user_defined_type_catalog EXPRESSION [] +POSTHOOK: Lineage: TABLES.user_defined_type_name EXPRESSION [] +POSTHOOK: Lineage: TABLES.user_defined_type_schema EXPRESSION [] +PREHOOK: query: CREATE VIEW IF NOT EXISTS `TABLE_PRIVILEGES` +( + `GRANTOR`, + `GRANTEE`, + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `PRIVILEGE_TYPE`, + `IS_GRANTABLE`, + `WITH_HIERARCHY` +) AS +SELECT + `GRANTOR`, + `PRINCIPAL_NAME`, + 'default', + D.`NAME`, + T.`TBL_NAME`, + P.`TBL_PRIV`, + IF (P.`GRANT_OPTION` == 0, 'NO', 'YES'), + 'NO' +FROM + sys.`TBL_PRIVS` P, + sys.`TBLS` T, + sys.`DBS` D +WHERE + P.TBL_ID = T.TBL_ID + AND T.DB_ID = D.DB_ID +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@dbs +PREHOOK: Input: sys@tbl_privs +PREHOOK: Input: sys@tbls +PREHOOK: Output: INFORMATION_SCHEMA@TABLE_PRIVILEGES +PREHOOK: Output: database:information_schema +POSTHOOK: query: CREATE VIEW IF NOT EXISTS `TABLE_PRIVILEGES` +( + `GRANTOR`, + `GRANTEE`, + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `PRIVILEGE_TYPE`, + `IS_GRANTABLE`, + `WITH_HIERARCHY` +) AS +SELECT + `GRANTOR`, + `PRINCIPAL_NAME`, + 'default', + D.`NAME`, + T.`TBL_NAME`, + P.`TBL_PRIV`, + IF (P.`GRANT_OPTION` == 0, 'NO', 'YES'), + 'NO' +FROM + sys.`TBL_PRIVS` P, + sys.`TBLS` T, + sys.`DBS` D +WHERE + P.TBL_ID = T.TBL_ID + AND T.DB_ID = D.DB_ID +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@dbs +POSTHOOK: Input: sys@tbl_privs +POSTHOOK: Input: sys@tbls +POSTHOOK: Output: INFORMATION_SCHEMA@TABLE_PRIVILEGES +POSTHOOK: Output: database:information_schema +POSTHOOK: Lineage: TABLE_PRIVILEGES.grantee SIMPLE [(tbl_privs)p.FieldSchema(name:principal_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_PRIVILEGES.grantor SIMPLE [(tbl_privs)p.FieldSchema(name:grantor, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_PRIVILEGES.is_grantable EXPRESSION [(tbl_privs)p.FieldSchema(name:grant_option, type:int, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_PRIVILEGES.privilege_type SIMPLE [(tbl_privs)p.FieldSchema(name:tbl_priv, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_PRIVILEGES.table_catalog SIMPLE [] +POSTHOOK: Lineage: TABLE_PRIVILEGES.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_PRIVILEGES.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: TABLE_PRIVILEGES.with_hierarchy SIMPLE [] +PREHOOK: query: CREATE VIEW IF NOT EXISTS `COLUMNS` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `COLUMN_NAME`, + `ORDINAL_POSITION`, + `COLUMN_DEFAULT`, + `IS_NULLABLE`, + `DATA_TYPE`, + `CHARACTER_MAXIMUM_LENGTH`, + `CHARACTER_OCTET_LENGTH`, + `NUMERIC_PRECISION`, + `NUMERIC_PRECISION_RADIX`, + `NUMERIC_SCALE`, + `DATETIME_PRECISION`, + `INTERVAL_TYPE`, + `INTERVAL_PRECISION`, + `CHARACTER_SET_CATALOG`, + `CHARACTER_SET_SCHEMA`, + `CHARACTER_SET_NAME`, + `COLLATION_CATALOG`, + `COLLATION_SCHEMA`, + `COLLATION_NAME`, + `UDT_CATALOG`, + `UDT_SCHEMA`, + `UDT_NAME`, + `SCOPE_CATALOG`, + `SCOPE_SCHEMA`, + `SCOPE_NAME`, + `MAXIMUM_CARDINALITY`, + `DTD_IDENTIFIER`, + `IS_SELF_REFERENCING`, + `IS_IDENTITY`, + `IDENTITY_GENERATION`, + `IDENTITY_START`, + `IDENTITY_INCREMENT`, + `IDENTITY_MAXIMUM`, + `IDENTITY_MINIMUM`, + `IDENTITY_CYCLE`, + `IS_GENERATED`, + `GENERATION_EXPRESSION`, + `IS_SYSTEM_TIME_PERIOD_START`, + `IS_SYSTEM_TIME_PERIOD_END`, + `SYSTEM_TIME_PERIOD_TIMESTAMP_GENERATION`, + `IS_UPDATABLE`, + `DECLARED_DATA_TYPE`, + `DECLARED_NUMERIC_PRECISION`, + `DECLARED_NUMERIC_SCALE` +) AS +SELECT + 'default', + D.NAME, + T.TBL_NAME, + C.COLUMN_NAME, + C.INTEGER_IDX, + cast (null as string), + 'YES', + C.TYPE_NAME as TYPE_NAME, + CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 + WHEN lower(C.TYPE_NAME) = 'float' THEN 23 + WHEN lower(C.TYPE_NAME) = 'double' THEN 53 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'float' THEN 2 + WHEN lower(C.TYPE_NAME) = 'double' THEN 2 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+),(\\d+)',2) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+),(\\d+)',2) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'date' THEN 0 + WHEN lower(C.TYPE_NAME) = 'timestamp' THEN 9 + ELSE null END, + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + C.CD_ID, + 'NO', + 'NO', + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + 'NEVER', + cast (null as string), + 'NO', + 'NO', + cast (null as string), + 'YES', + C.TYPE_NAME as DECLARED_DATA_TYPE, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 + WHEN lower(C.TYPE_NAME) = 'float' THEN 23 + WHEN lower(C.TYPE_NAME) = 'double' THEN 53 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'float' THEN 2 + WHEN lower(C.TYPE_NAME) = 'double' THEN 2 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 + ELSE null END +FROM + sys.`COLUMNS_V2` C, + sys.`SDS` S, + sys.`TBLS` T, + sys.`DBS` D +WHERE + S.`SD_ID` = T.`SD_ID` + AND T.`DB_ID` = D.`DB_ID` + AND C.`CD_ID` = S.`CD_ID` +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@columns_v2 +PREHOOK: Input: sys@dbs +PREHOOK: Input: sys@sds +PREHOOK: Input: sys@tbls +PREHOOK: Output: INFORMATION_SCHEMA@COLUMNS +PREHOOK: Output: database:information_schema +POSTHOOK: query: CREATE VIEW IF NOT EXISTS `COLUMNS` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `COLUMN_NAME`, + `ORDINAL_POSITION`, + `COLUMN_DEFAULT`, + `IS_NULLABLE`, + `DATA_TYPE`, + `CHARACTER_MAXIMUM_LENGTH`, + `CHARACTER_OCTET_LENGTH`, + `NUMERIC_PRECISION`, + `NUMERIC_PRECISION_RADIX`, + `NUMERIC_SCALE`, + `DATETIME_PRECISION`, + `INTERVAL_TYPE`, + `INTERVAL_PRECISION`, + `CHARACTER_SET_CATALOG`, + `CHARACTER_SET_SCHEMA`, + `CHARACTER_SET_NAME`, + `COLLATION_CATALOG`, + `COLLATION_SCHEMA`, + `COLLATION_NAME`, + `UDT_CATALOG`, + `UDT_SCHEMA`, + `UDT_NAME`, + `SCOPE_CATALOG`, + `SCOPE_SCHEMA`, + `SCOPE_NAME`, + `MAXIMUM_CARDINALITY`, + `DTD_IDENTIFIER`, + `IS_SELF_REFERENCING`, + `IS_IDENTITY`, + `IDENTITY_GENERATION`, + `IDENTITY_START`, + `IDENTITY_INCREMENT`, + `IDENTITY_MAXIMUM`, + `IDENTITY_MINIMUM`, + `IDENTITY_CYCLE`, + `IS_GENERATED`, + `GENERATION_EXPRESSION`, + `IS_SYSTEM_TIME_PERIOD_START`, + `IS_SYSTEM_TIME_PERIOD_END`, + `SYSTEM_TIME_PERIOD_TIMESTAMP_GENERATION`, + `IS_UPDATABLE`, + `DECLARED_DATA_TYPE`, + `DECLARED_NUMERIC_PRECISION`, + `DECLARED_NUMERIC_SCALE` +) AS +SELECT + 'default', + D.NAME, + T.TBL_NAME, + C.COLUMN_NAME, + C.INTEGER_IDX, + cast (null as string), + 'YES', + C.TYPE_NAME as TYPE_NAME, + CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 + WHEN lower(C.TYPE_NAME) = 'float' THEN 23 + WHEN lower(C.TYPE_NAME) = 'double' THEN 53 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'float' THEN 2 + WHEN lower(C.TYPE_NAME) = 'double' THEN 2 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+),(\\d+)',2) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+),(\\d+)',2) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'date' THEN 0 + WHEN lower(C.TYPE_NAME) = 'timestamp' THEN 9 + ELSE null END, + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + C.CD_ID, + 'NO', + 'NO', + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + 'NEVER', + cast (null as string), + 'NO', + 'NO', + cast (null as string), + 'YES', + C.TYPE_NAME as DECLARED_DATA_TYPE, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 + WHEN lower(C.TYPE_NAME) = 'float' THEN 23 + WHEN lower(C.TYPE_NAME) = 'double' THEN 53 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'float' THEN 2 + WHEN lower(C.TYPE_NAME) = 'double' THEN 2 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 + ELSE null END +FROM + sys.`COLUMNS_V2` C, + sys.`SDS` S, + sys.`TBLS` T, + sys.`DBS` D +WHERE + S.`SD_ID` = T.`SD_ID` + AND T.`DB_ID` = D.`DB_ID` + AND C.`CD_ID` = S.`CD_ID` +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@columns_v2 +POSTHOOK: Input: sys@dbs +POSTHOOK: Input: sys@sds +POSTHOOK: Input: sys@tbls +POSTHOOK: Output: INFORMATION_SCHEMA@COLUMNS +POSTHOOK: Output: database:information_schema +POSTHOOK: Lineage: COLUMNS.character_maximum_length EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.character_octet_length EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.character_set_catalog EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.character_set_name EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.character_set_schema EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.collation_catalog EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.collation_name EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.collation_schema EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.column_default EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.column_name SIMPLE [(columns_v2)c.FieldSchema(name:column_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.data_type SIMPLE [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.datetime_precision EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.declared_data_type SIMPLE [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.declared_numeric_precision EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.declared_numeric_scale EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.dtd_identifier SIMPLE [(columns_v2)c.FieldSchema(name:cd_id, type:bigint, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.generation_expression EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.identity_cycle EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.identity_generation EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.identity_increment EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.identity_maximum EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.identity_minimum EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.identity_start EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.interval_precision EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.interval_type EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.is_generated SIMPLE [] +POSTHOOK: Lineage: COLUMNS.is_identity SIMPLE [] +POSTHOOK: Lineage: COLUMNS.is_nullable SIMPLE [] +POSTHOOK: Lineage: COLUMNS.is_self_referencing SIMPLE [] +POSTHOOK: Lineage: COLUMNS.is_system_time_period_end SIMPLE [] +POSTHOOK: Lineage: COLUMNS.is_system_time_period_start SIMPLE [] +POSTHOOK: Lineage: COLUMNS.is_updatable SIMPLE [] +POSTHOOK: Lineage: COLUMNS.maximum_cardinality EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.numeric_precision EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.numeric_precision_radix EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.numeric_scale EXPRESSION [(columns_v2)c.FieldSchema(name:type_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.ordinal_position SIMPLE [(columns_v2)c.FieldSchema(name:integer_idx, type:int, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.scope_catalog EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.scope_name EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.scope_schema EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.system_time_period_timestamp_generation EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.table_catalog SIMPLE [] +POSTHOOK: Lineage: COLUMNS.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMNS.udt_catalog EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.udt_name EXPRESSION [] +POSTHOOK: Lineage: COLUMNS.udt_schema EXPRESSION [] +PREHOOK: query: CREATE VIEW IF NOT EXISTS `COLUMN_PRIVILEGES` +( + `GRANTOR`, + `GRANTEE`, + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `COLUMN_NAME`, + `PRIVILEGE_TYPE`, + `IS_GRANTABLE` +) AS +SELECT + `GRANTOR`, + `PRINCIPAL_NAME`, + 'default', + D.`NAME`, + T.`TBL_NAME`, + C.`COLUMN_NAME`, + P.`TBL_COL_PRIV`, + IF (P.`GRANT_OPTION` == 0, 'NO', 'YES') +FROM + sys.`TBL_COL_PRIVS` P, + sys.`TBLS` T, + sys.`DBS` D, + sys.`COLUMNS_V2` C, + sys.`SDS` S +WHERE + S.`SD_ID` = T.`SD_ID` + AND T.`DB_ID` = D.`DB_ID` + AND P.`TBL_ID` = T.`TBL_ID` + AND P.`COLUMN_NAME` = C.`COLUMN_NAME` + AND C.`CD_ID` = S.`CD_ID` +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@columns_v2 +PREHOOK: Input: sys@dbs +PREHOOK: Input: sys@sds +PREHOOK: Input: sys@tbl_col_privs +PREHOOK: Input: sys@tbls +PREHOOK: Output: INFORMATION_SCHEMA@COLUMN_PRIVILEGES +PREHOOK: Output: database:information_schema +POSTHOOK: query: CREATE VIEW IF NOT EXISTS `COLUMN_PRIVILEGES` +( + `GRANTOR`, + `GRANTEE`, + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `COLUMN_NAME`, + `PRIVILEGE_TYPE`, + `IS_GRANTABLE` +) AS +SELECT + `GRANTOR`, + `PRINCIPAL_NAME`, + 'default', + D.`NAME`, + T.`TBL_NAME`, + C.`COLUMN_NAME`, + P.`TBL_COL_PRIV`, + IF (P.`GRANT_OPTION` == 0, 'NO', 'YES') +FROM + sys.`TBL_COL_PRIVS` P, + sys.`TBLS` T, + sys.`DBS` D, + sys.`COLUMNS_V2` C, + sys.`SDS` S +WHERE + S.`SD_ID` = T.`SD_ID` + AND T.`DB_ID` = D.`DB_ID` + AND P.`TBL_ID` = T.`TBL_ID` + AND P.`COLUMN_NAME` = C.`COLUMN_NAME` + AND C.`CD_ID` = S.`CD_ID` +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@columns_v2 +POSTHOOK: Input: sys@dbs +POSTHOOK: Input: sys@sds +POSTHOOK: Input: sys@tbl_col_privs +POSTHOOK: Input: sys@tbls +POSTHOOK: Output: INFORMATION_SCHEMA@COLUMN_PRIVILEGES +POSTHOOK: Output: database:information_schema +POSTHOOK: Lineage: COLUMN_PRIVILEGES.column_name SIMPLE [(columns_v2)c.FieldSchema(name:column_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMN_PRIVILEGES.grantee SIMPLE [(tbl_col_privs)p.FieldSchema(name:principal_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMN_PRIVILEGES.grantor SIMPLE [(tbl_col_privs)p.FieldSchema(name:grantor, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMN_PRIVILEGES.is_grantable EXPRESSION [(tbl_col_privs)p.FieldSchema(name:grant_option, type:int, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMN_PRIVILEGES.privilege_type SIMPLE [(tbl_col_privs)p.FieldSchema(name:tbl_col_priv, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMN_PRIVILEGES.table_catalog SIMPLE [] +POSTHOOK: Lineage: COLUMN_PRIVILEGES.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: COLUMN_PRIVILEGES.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] +PREHOOK: query: CREATE VIEW IF NOT EXISTS `VIEWS` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `VIEW_DEFINITION`, + `CHECK_OPTION`, + `IS_UPDATABLE`, + `IS_INSERTABLE_INTO`, + `IS_TRIGGER_UPDATABLE`, + `IS_TRIGGER_DELETABLE`, + `IS_TRIGGER_INSERTABLE_INTO` +) AS +SELECT + 'default', + D.NAME, + T.TBL_NAME, + T.VIEW_ORIGINAL_TEXT, + CAST(NULL as string), + false, + false, + false, + false, + false +FROM + `sys`.`DBS` D, + `sys`.`TBLS` T +WHERE + D.`DB_ID` = T.`DB_ID` AND + length(T.VIEW_ORIGINAL_TEXT) > 0 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: sys@dbs +PREHOOK: Input: sys@tbls +PREHOOK: Output: INFORMATION_SCHEMA@VIEWS +PREHOOK: Output: database:information_schema +POSTHOOK: query: CREATE VIEW IF NOT EXISTS `VIEWS` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `VIEW_DEFINITION`, + `CHECK_OPTION`, + `IS_UPDATABLE`, + `IS_INSERTABLE_INTO`, + `IS_TRIGGER_UPDATABLE`, + `IS_TRIGGER_DELETABLE`, + `IS_TRIGGER_INSERTABLE_INTO` +) AS +SELECT + 'default', + D.NAME, + T.TBL_NAME, + T.VIEW_ORIGINAL_TEXT, + CAST(NULL as string), + false, + false, + false, + false, + false +FROM + `sys`.`DBS` D, + `sys`.`TBLS` T +WHERE + D.`DB_ID` = T.`DB_ID` AND + length(T.VIEW_ORIGINAL_TEXT) > 0 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: sys@dbs +POSTHOOK: Input: sys@tbls +POSTHOOK: Output: INFORMATION_SCHEMA@VIEWS +POSTHOOK: Output: database:information_schema +POSTHOOK: Lineage: VIEWS.check_option EXPRESSION [] +POSTHOOK: Lineage: VIEWS.is_insertable_into SIMPLE [] +POSTHOOK: Lineage: VIEWS.is_trigger_deletable SIMPLE [] +POSTHOOK: Lineage: VIEWS.is_trigger_insertable_into SIMPLE [] +POSTHOOK: Lineage: VIEWS.is_trigger_updatable SIMPLE [] +POSTHOOK: Lineage: VIEWS.is_updatable SIMPLE [] +POSTHOOK: Lineage: VIEWS.table_catalog SIMPLE [] +POSTHOOK: Lineage: VIEWS.table_name SIMPLE [(tbls)t.FieldSchema(name:tbl_name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: VIEWS.table_schema SIMPLE [(dbs)d.FieldSchema(name:name, type:string, comment:from deserializer), ] +POSTHOOK: Lineage: VIEWS.view_definition SIMPLE [(tbls)t.FieldSchema(name:view_original_text, type:string, comment:from deserializer), ] +PREHOOK: query: SHOW RESOURCE PLANS +PREHOOK: type: SHOW_RESOURCEPLAN +POSTHOOK: query: SHOW RESOURCE PLANS +POSTHOOK: type: SHOW_RESOURCEPLAN +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +PREHOOK: query: CREATE RESOURCE PLAN plan_1 +PREHOOK: type: CREATE_RESOURCEPLAN +POSTHOOK: query: CREATE RESOURCE PLAN plan_1 +POSTHOOK: type: CREATE_RESOURCEPLAN +PREHOOK: query: SHOW RESOURCE PLANS +PREHOOK: type: SHOW_RESOURCEPLAN +POSTHOOK: query: SHOW RESOURCE PLANS +POSTHOOK: type: SHOW_RESOURCEPLAN +plan_1 null +PREHOOK: query: SHOW RESOURCE PLAN plan_1 +PREHOOK: type: SHOW_RESOURCEPLAN +POSTHOOK: query: SHOW RESOURCE PLAN plan_1 +POSTHOOK: type: SHOW_RESOURCEPLAN +plan_1 null +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 DISABLED NULL +PREHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM 10 +PREHOOK: type: CREATE_RESOURCEPLAN +POSTHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM 10 +POSTHOOK: type: CREATE_RESOURCEPLAN +PREHOOK: query: SHOW RESOURCE PLANS +PREHOOK: type: SHOW_RESOURCEPLAN +POSTHOOK: query: SHOW RESOURCE PLANS +POSTHOOK: type: SHOW_RESOURCEPLAN +plan_1 null +plan_2 10 +PREHOOK: query: SHOW RESOURCE PLAN plan_2 +PREHOOK: type: SHOW_RESOURCEPLAN +POSTHOOK: query: SHOW RESOURCE PLAN plan_2 +POSTHOOK: type: SHOW_RESOURCEPLAN +plan_2 10 +PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +PREHOOK: type: QUERY +PREHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS +POSTHOOK: type: QUERY +POSTHOOK: Input: sys@wm_resourceplans +#### A masked pattern was here #### +plan_1 DISABLED NULL +plan_2 DISABLED 10 diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index 878b6f0789..e5765adb3f 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -39970,6 +39970,1315 @@ uint32_t ThriftHiveMetastore_get_metastore_db_uuid_presult::read(::apache::thrif return xfer; } + +ThriftHiveMetastore_create_resource_plan_args::~ThriftHiveMetastore_create_resource_plan_args() throw() { +} + + +uint32_t ThriftHiveMetastore_create_resource_plan_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->resourcePlan.read(iprot); + this->__isset.resourcePlan = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_create_resource_plan_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_resource_plan_args"); + + xfer += oprot->writeFieldBegin("resourcePlan", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->resourcePlan.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_create_resource_plan_pargs::~ThriftHiveMetastore_create_resource_plan_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_create_resource_plan_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_resource_plan_pargs"); + + xfer += oprot->writeFieldBegin("resourcePlan", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->resourcePlan)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_create_resource_plan_result::~ThriftHiveMetastore_create_resource_plan_result() throw() { +} + + +uint32_t ThriftHiveMetastore_create_resource_plan_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_create_resource_plan_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_resource_plan_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_create_resource_plan_presult::~ThriftHiveMetastore_create_resource_plan_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_create_resource_plan_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_resource_plan_args::~ThriftHiveMetastore_get_resource_plan_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_resource_plan_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_resource_plan_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_resource_plan_args"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_resource_plan_pargs::~ThriftHiveMetastore_get_resource_plan_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_resource_plan_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_resource_plan_pargs"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_resource_plan_result::~ThriftHiveMetastore_get_resource_plan_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_resource_plan_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_resource_plan_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_resource_plan_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_resource_plan_presult::~ThriftHiveMetastore_get_resource_plan_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_resource_plan_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_all_resource_plans_args::~ThriftHiveMetastore_get_all_resource_plans_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_all_resource_plans_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_all_resource_plans_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_resource_plans_args"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_all_resource_plans_pargs::~ThriftHiveMetastore_get_all_resource_plans_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_all_resource_plans_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_resource_plans_pargs"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_all_resource_plans_result::~ThriftHiveMetastore_get_all_resource_plans_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_all_resource_plans_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1535; + ::apache::thrift::protocol::TType _etype1538; + xfer += iprot->readListBegin(_etype1538, _size1535); + this->success.resize(_size1535); + uint32_t _i1539; + for (_i1539 = 0; _i1539 < _size1535; ++_i1539) + { + xfer += this->success[_i1539].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_all_resource_plans_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_resource_plans_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1540; + for (_iter1540 = this->success.begin(); _iter1540 != this->success.end(); ++_iter1540) + { + xfer += (*_iter1540).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_all_resource_plans_presult::~ThriftHiveMetastore_get_all_resource_plans_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_all_resource_plans_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1541; + ::apache::thrift::protocol::TType _etype1544; + xfer += iprot->readListBegin(_etype1544, _size1541); + (*(this->success)).resize(_size1541); + uint32_t _i1545; + for (_i1545 = 0; _i1545 < _size1541; ++_i1545) + { + xfer += (*(this->success))[_i1545].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_alter_resource_plan_args::~ThriftHiveMetastore_alter_resource_plan_args() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_resource_plan_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->resourcePlan.read(iprot); + this->__isset.resourcePlan = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_resource_plan_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_resource_plan_args"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("resourcePlan", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->resourcePlan.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_alter_resource_plan_pargs::~ThriftHiveMetastore_alter_resource_plan_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_resource_plan_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_resource_plan_pargs"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("resourcePlan", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += (*(this->resourcePlan)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_alter_resource_plan_result::~ThriftHiveMetastore_alter_resource_plan_result() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_resource_plan_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_resource_plan_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_resource_plan_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_alter_resource_plan_presult::~ThriftHiveMetastore_alter_resource_plan_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_resource_plan_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_validate_resource_plan_args::~ThriftHiveMetastore_validate_resource_plan_args() throw() { +} + + +uint32_t ThriftHiveMetastore_validate_resource_plan_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_validate_resource_plan_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_validate_resource_plan_args"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_validate_resource_plan_pargs::~ThriftHiveMetastore_validate_resource_plan_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_validate_resource_plan_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_validate_resource_plan_pargs"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_validate_resource_plan_result::~ThriftHiveMetastore_validate_resource_plan_result() throw() { +} + + +uint32_t ThriftHiveMetastore_validate_resource_plan_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_validate_resource_plan_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_validate_resource_plan_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_validate_resource_plan_presult::~ThriftHiveMetastore_validate_resource_plan_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_validate_resource_plan_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_drop_resource_plan_args::~ThriftHiveMetastore_drop_resource_plan_args() throw() { +} + + +uint32_t ThriftHiveMetastore_drop_resource_plan_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_resource_plan_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_resource_plan_args"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_drop_resource_plan_pargs::~ThriftHiveMetastore_drop_resource_plan_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_drop_resource_plan_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_resource_plan_pargs"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_drop_resource_plan_result::~ThriftHiveMetastore_drop_resource_plan_result() throw() { +} + + +uint32_t ThriftHiveMetastore_drop_resource_plan_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_resource_plan_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_resource_plan_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_drop_resource_plan_presult::~ThriftHiveMetastore_drop_resource_plan_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_drop_resource_plan_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + void ThriftHiveMetastoreClient::getMetaConf(std::string& _return, const std::string& key) { send_getMetaConf(key); @@ -50209,6 +51518,370 @@ void ThriftHiveMetastoreClient::recv_get_metastore_db_uuid(std::string& _return) throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_metastore_db_uuid failed: unknown result"); } +void ThriftHiveMetastoreClient::create_resource_plan(const WMResourcePlan& resourcePlan) +{ + send_create_resource_plan(resourcePlan); + recv_create_resource_plan(); +} + +void ThriftHiveMetastoreClient::send_create_resource_plan(const WMResourcePlan& resourcePlan) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("create_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_create_resource_plan_pargs args; + args.resourcePlan = &resourcePlan; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_create_resource_plan() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_resource_plan") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_create_resource_plan_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + return; +} + +void ThriftHiveMetastoreClient::get_resource_plan(WMResourcePlan& _return, const std::string& name) +{ + send_get_resource_plan(name); + recv_get_resource_plan(_return); +} + +void ThriftHiveMetastoreClient::send_get_resource_plan(const std::string& name) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_resource_plan_pargs args; + args.name = &name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_resource_plan(WMResourcePlan& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_resource_plan") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_resource_plan_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_resource_plan failed: unknown result"); +} + +void ThriftHiveMetastoreClient::get_all_resource_plans(std::vector & _return) +{ + send_get_all_resource_plans(); + recv_get_all_resource_plans(_return); +} + +void ThriftHiveMetastoreClient::send_get_all_resource_plans() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_all_resource_plans", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_all_resource_plans_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_all_resource_plans(std::vector & _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_all_resource_plans") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_all_resource_plans_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_resource_plans failed: unknown result"); +} + +void ThriftHiveMetastoreClient::alter_resource_plan(const std::string& name, const WMResourcePlan& resourcePlan) +{ + send_alter_resource_plan(name, resourcePlan); + recv_alter_resource_plan(); +} + +void ThriftHiveMetastoreClient::send_alter_resource_plan(const std::string& name, const WMResourcePlan& resourcePlan) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("alter_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_alter_resource_plan_pargs args; + args.name = &name; + args.resourcePlan = &resourcePlan; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_alter_resource_plan() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("alter_resource_plan") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_alter_resource_plan_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o2) { + throw result.o2; + } + return; +} + +void ThriftHiveMetastoreClient::validate_resource_plan(const std::string& name) +{ + send_validate_resource_plan(name); + recv_validate_resource_plan(); +} + +void ThriftHiveMetastoreClient::send_validate_resource_plan(const std::string& name) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("validate_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_validate_resource_plan_pargs args; + args.name = &name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_validate_resource_plan() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("validate_resource_plan") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_validate_resource_plan_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o3) { + throw result.o3; + } + return; +} + +void ThriftHiveMetastoreClient::drop_resource_plan(const std::string& name) +{ + send_drop_resource_plan(name); + recv_drop_resource_plan(); +} + +void ThriftHiveMetastoreClient::send_drop_resource_plan(const std::string& name) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("drop_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_resource_plan_pargs args; + args.name = &name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_drop_resource_plan() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_resource_plan") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_drop_resource_plan_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + return; +} + bool ThriftHiveMetastoreProcessor::dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext) { ProcessMap::iterator pfn; pfn = processMap_.find(fname); @@ -59496,11 +61169,522 @@ void ThriftHiveMetastoreProcessor::process_get_file_metadata_by_expr(int32_t seq result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + } + + oprot->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_file_metadata", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_file_metadata"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_file_metadata"); + } + + ThriftHiveMetastore_get_file_metadata_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_file_metadata", bytes); + } + + ThriftHiveMetastore_get_file_metadata_result result; + try { + iface_->get_file_metadata(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_file_metadata"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_file_metadata"); + } + + oprot->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_file_metadata", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_put_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.put_file_metadata", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.put_file_metadata"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.put_file_metadata"); + } + + ThriftHiveMetastore_put_file_metadata_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.put_file_metadata", bytes); + } + + ThriftHiveMetastore_put_file_metadata_result result; + try { + iface_->put_file_metadata(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.put_file_metadata"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.put_file_metadata"); + } + + oprot->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.put_file_metadata", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_clear_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.clear_file_metadata", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.clear_file_metadata"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.clear_file_metadata"); + } + + ThriftHiveMetastore_clear_file_metadata_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.clear_file_metadata", bytes); + } + + ThriftHiveMetastore_clear_file_metadata_result result; + try { + iface_->clear_file_metadata(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.clear_file_metadata"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.clear_file_metadata"); + } + + oprot->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.clear_file_metadata", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_cache_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.cache_file_metadata", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.cache_file_metadata"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.cache_file_metadata"); + } + + ThriftHiveMetastore_cache_file_metadata_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.cache_file_metadata", bytes); + } + + ThriftHiveMetastore_cache_file_metadata_result result; + try { + iface_->cache_file_metadata(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.cache_file_metadata"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.cache_file_metadata"); + } + + oprot->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.cache_file_metadata", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_metastore_db_uuid(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_metastore_db_uuid", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); + } + + ThriftHiveMetastore_get_metastore_db_uuid_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_metastore_db_uuid", bytes); + } + + ThriftHiveMetastore_get_metastore_db_uuid_result result; + try { + iface_->get_metastore_db_uuid(result.success); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_metastore_db_uuid", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); + } + + oprot->writeMessageBegin("get_metastore_db_uuid", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_metastore_db_uuid", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_create_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_resource_plan", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_resource_plan"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_resource_plan"); + } + + ThriftHiveMetastore_create_resource_plan_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_resource_plan", bytes); + } + + ThriftHiveMetastore_create_resource_plan_result result; + try { + iface_->create_resource_plan(args.resourcePlan); + } catch (InvalidObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_resource_plan"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("create_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_resource_plan"); + } + + oprot->writeMessageBegin("create_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_resource_plan", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_resource_plan", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_resource_plan"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_resource_plan"); + } + + ThriftHiveMetastore_get_resource_plan_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_resource_plan", bytes); + } + + ThriftHiveMetastore_get_resource_plan_result result; + try { + iface_->get_resource_plan(result.success, args.name); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_resource_plan"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_resource_plan"); + } + + oprot->writeMessageBegin("get_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_resource_plan", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_all_resource_plans(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_all_resource_plans", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_all_resource_plans"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_all_resource_plans"); + } + + ThriftHiveMetastore_get_all_resource_plans_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_all_resource_plans", bytes); + } + + ThriftHiveMetastore_get_all_resource_plans_result result; + try { + iface_->get_all_resource_plans(result.success); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_all_resource_plans"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_all_resource_plans", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_all_resource_plans"); + } + + oprot->writeMessageBegin("get_all_resource_plans", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_all_resource_plans", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_alter_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_resource_plan", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_resource_plan"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_resource_plan"); + } + + ThriftHiveMetastore_alter_resource_plan_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_resource_plan", bytes); + } + + ThriftHiveMetastore_alter_resource_plan_result result; + try { + iface_->alter_resource_plan(args.name, args.resourcePlan); + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidOperationException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_resource_plan"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("alter_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59509,52 +61693,57 @@ void ThriftHiveMetastoreProcessor::process_get_file_metadata_by_expr(int32_t seq } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_resource_plan"); } - oprot->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("alter_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_resource_plan", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_validate_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_file_metadata", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.validate_resource_plan", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_file_metadata"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.validate_resource_plan"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_file_metadata"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.validate_resource_plan"); } - ThriftHiveMetastore_get_file_metadata_args args; + ThriftHiveMetastore_validate_resource_plan_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_file_metadata", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.validate_resource_plan", bytes); } - ThriftHiveMetastore_get_file_metadata_result result; + ThriftHiveMetastore_validate_resource_plan_result result; try { - iface_->get_file_metadata(result.success, args.req); - result.__isset.success = true; + iface_->validate_resource_plan(args.name); + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_file_metadata"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.validate_resource_plan"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("validate_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59563,52 +61752,57 @@ void ThriftHiveMetastoreProcessor::process_get_file_metadata(int32_t seqid, ::ap } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_file_metadata"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.validate_resource_plan"); } - oprot->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("validate_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_file_metadata", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.validate_resource_plan", bytes); } } -void ThriftHiveMetastoreProcessor::process_put_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_drop_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.put_file_metadata", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_resource_plan", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.put_file_metadata"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_resource_plan"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.put_file_metadata"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_resource_plan"); } - ThriftHiveMetastore_put_file_metadata_args args; + ThriftHiveMetastore_drop_resource_plan_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.put_file_metadata", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_resource_plan", bytes); } - ThriftHiveMetastore_put_file_metadata_result result; + ThriftHiveMetastore_drop_resource_plan_result result; try { - iface_->put_file_metadata(result.success, args.req); - result.__isset.success = true; + iface_->drop_resource_plan(args.name); + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.put_file_metadata"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_resource_plan"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("drop_resource_plan", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -59616,207 +61810,749 @@ void ThriftHiveMetastoreProcessor::process_put_file_metadata(int32_t seqid, ::ap return; } - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.put_file_metadata"); - } + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_resource_plan"); + } + + oprot->writeMessageBegin("drop_resource_plan", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_resource_plan", bytes); + } +} + +::boost::shared_ptr< ::apache::thrift::TProcessor > ThriftHiveMetastoreProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) { + ::apache::thrift::ReleaseHandler< ThriftHiveMetastoreIfFactory > cleanup(handlerFactory_); + ::boost::shared_ptr< ThriftHiveMetastoreIf > handler(handlerFactory_->getHandler(connInfo), cleanup); + ::boost::shared_ptr< ::apache::thrift::TProcessor > processor(new ThriftHiveMetastoreProcessor(handler)); + return processor; +} + +void ThriftHiveMetastoreConcurrentClient::getMetaConf(std::string& _return, const std::string& key) +{ + int32_t seqid = send_getMetaConf(key); + recv_getMetaConf(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_getMetaConf(const std::string& key) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("getMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_getMetaConf_pargs args; + args.key = &key; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("getMetaConf") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_getMetaConf_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getMetaConf failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::setMetaConf(const std::string& key, const std::string& value) +{ + int32_t seqid = send_setMetaConf(key, value); + recv_setMetaConf(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_setMetaConf(const std::string& key, const std::string& value) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("setMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_setMetaConf_pargs args; + args.key = &key; + args.value = &value; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("setMetaConf") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_setMetaConf_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::create_database(const Database& database) +{ + int32_t seqid = send_create_database(database); + recv_create_database(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_create_database(const Database& database) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("create_database", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_create_database_pargs args; + args.database = &database; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_create_database_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_database(Database& _return, const std::string& name) +{ + int32_t seqid = send_get_database(name); + recv_get_database(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_database(const std::string& name) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_database", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_database_pargs args; + args.name = &name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_database_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_database failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::drop_database(const std::string& name, const bool deleteData, const bool cascade) +{ + int32_t seqid = send_drop_database(name, deleteData, cascade); + recv_drop_database(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_database(const std::string& name, const bool deleteData, const bool cascade) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_database_pargs args; + args.name = &name; + args.deleteData = &deleteData; + args.cascade = &cascade; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_drop_database_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_databases(std::vector & _return, const std::string& pattern) +{ + int32_t seqid = send_get_databases(pattern); + recv_get_databases(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_databases(const std::string& pattern) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_CALL, cseqid); - oprot->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + ThriftHiveMetastore_get_databases_pargs args; + args.pattern = &pattern; + args.write(oprot_); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.put_file_metadata", bytes); - } + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; } -void ThriftHiveMetastoreProcessor::process_clear_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vector & _return, const int32_t seqid) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.clear_file_metadata", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.clear_file_metadata"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.clear_file_metadata"); - } - ThriftHiveMetastore_clear_file_metadata_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.clear_file_metadata", bytes); - } + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - ThriftHiveMetastore_clear_file_metadata_result result; - try { - iface_->clear_file_metadata(result.success, args.req); - result.__isset.success = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.clear_file_metadata"); + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_databases") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_databases_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.clear_file_metadata"); - } + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_databases failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); - oprot->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.clear_file_metadata", bytes); - } +void ThriftHiveMetastoreConcurrentClient::get_all_databases(std::vector & _return) +{ + int32_t seqid = send_get_all_databases(); + recv_get_all_databases(_return, seqid); } -void ThriftHiveMetastoreProcessor::process_cache_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_databases() { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.cache_file_metadata", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.cache_file_metadata"); + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_CALL, cseqid); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.cache_file_metadata"); - } + ThriftHiveMetastore_get_all_databases_pargs args; + args.write(oprot_); - ThriftHiveMetastore_cache_file_metadata_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.cache_file_metadata", bytes); - } + sentry.commit(); + return cseqid; +} - ThriftHiveMetastore_cache_file_metadata_result result; - try { - iface_->cache_file_metadata(result.success, args.req); - result.__isset.success = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.cache_file_metadata"); +void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vector & _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_all_databases") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_all_databases_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.cache_file_metadata"); - } + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_databases failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); - oprot->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.cache_file_metadata", bytes); - } +void ThriftHiveMetastoreConcurrentClient::alter_database(const std::string& dbname, const Database& db) +{ + int32_t seqid = send_alter_database(dbname, db); + recv_alter_database(seqid); } -void ThriftHiveMetastoreProcessor::process_get_metastore_db_uuid(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_database(const std::string& dbname, const Database& db) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_metastore_db_uuid", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("alter_database", ::apache::thrift::protocol::T_CALL, cseqid); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); - } + ThriftHiveMetastore_alter_database_pargs args; + args.dbname = &dbname; + args.db = &db; + args.write(oprot_); - ThriftHiveMetastore_get_metastore_db_uuid_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_metastore_db_uuid", bytes); - } + sentry.commit(); + return cseqid; +} - ThriftHiveMetastore_get_metastore_db_uuid_result result; - try { - iface_->get_metastore_db_uuid(result.success); - result.__isset.success = true; - } catch (MetaException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); - } +void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqid) +{ - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_metastore_db_uuid", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_metastore_db_uuid"); - } + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - oprot->writeMessageBegin("get_metastore_db_uuid", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("alter_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_metastore_db_uuid", bytes); - } -} + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_alter_database_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); -::boost::shared_ptr< ::apache::thrift::TProcessor > ThriftHiveMetastoreProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) { - ::apache::thrift::ReleaseHandler< ThriftHiveMetastoreIfFactory > cleanup(handlerFactory_); - ::boost::shared_ptr< ThriftHiveMetastoreIf > handler(handlerFactory_->getHandler(connInfo), cleanup); - ::boost::shared_ptr< ::apache::thrift::TProcessor > processor(new ThriftHiveMetastoreProcessor(handler)); - return processor; + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::getMetaConf(std::string& _return, const std::string& key) +void ThriftHiveMetastoreConcurrentClient::get_type(Type& _return, const std::string& name) { - int32_t seqid = send_getMetaConf(key); - recv_getMetaConf(_return, seqid); + int32_t seqid = send_get_type(name); + recv_get_type(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_getMetaConf(const std::string& key) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_type(const std::string& name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("getMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_getMetaConf_pargs args; - args.key = &key; + ThriftHiveMetastore_get_type_pargs args; + args.name = &name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -59827,7 +62563,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_getMetaConf(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -59856,7 +62592,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("getMetaConf") != 0) { + if (fname.compare("get_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -59865,7 +62601,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_getMetaConf_presult result; + ThriftHiveMetastore_get_type_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -59880,8 +62616,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getMetaConf failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -59891,21 +62631,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::setMetaConf(const std::string& key, const std::string& value) +bool ThriftHiveMetastoreConcurrentClient::create_type(const Type& type) { - int32_t seqid = send_setMetaConf(key, value); - recv_setMetaConf(seqid); + int32_t seqid = send_create_type(type); + return recv_create_type(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_setMetaConf(const std::string& key, const std::string& value) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_type(const Type& type) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("setMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_setMetaConf_pargs args; - args.key = &key; - args.value = &value; + ThriftHiveMetastore_create_type_pargs args; + args.type = &type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -59916,7 +62655,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_setMetaConf(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) { int32_t rseqid = 0; @@ -59945,7 +62684,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("setMetaConf") != 0) { + if (fname.compare("create_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -59954,17 +62693,31 @@ void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_setMetaConf_presult result; + bool _return; + ThriftHiveMetastore_create_type_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + sentry.commit(); + return _return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - sentry.commit(); - return; + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_type failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -59974,20 +62727,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_database(const Database& database) +bool ThriftHiveMetastoreConcurrentClient::drop_type(const std::string& type) { - int32_t seqid = send_create_database(database); - recv_create_database(seqid); + int32_t seqid = send_drop_type(type); + return recv_drop_type(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_database(const Database& database) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_type(const std::string& type) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_database", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_database_pargs args; - args.database = &database; + ThriftHiveMetastore_drop_type_pargs args; + args.type = &type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -59998,7 +62751,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_database(const Database return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) { int32_t rseqid = 0; @@ -60027,7 +62780,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_database") != 0) { + if (fname.compare("drop_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60036,11 +62789,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_database_presult result; + bool _return; + ThriftHiveMetastore_drop_type_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + sentry.commit(); + return _return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -60049,12 +62808,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seq sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_type failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60064,19 +62819,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_database(Database& _return, const std::string& name) +void ThriftHiveMetastoreConcurrentClient::get_type_all(std::map & _return, const std::string& name) { - int32_t seqid = send_get_database(name); - recv_get_database(_return, seqid); + int32_t seqid = send_get_type_all(name); + recv_get_type_all(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_database(const std::string& name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_type_all(const std::string& name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_database", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_type_all", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_database_pargs args; + ThriftHiveMetastore_get_type_all_pargs args; args.name = &name; args.write(oprot_); @@ -60088,7 +62843,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_database(const std::string return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::map & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60117,7 +62872,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_database") != 0) { + if (fname.compare("get_type_all") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60126,7 +62881,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_database_presult result; + ThriftHiveMetastore_get_type_all_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -60137,16 +62892,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c sentry.commit(); return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } if (result.__isset.o2) { sentry.commit(); throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_database failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type_all failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60156,22 +62907,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_database(const std::string& name, const bool deleteData, const bool cascade) +void ThriftHiveMetastoreConcurrentClient::get_fields(std::vector & _return, const std::string& db_name, const std::string& table_name) { - int32_t seqid = send_drop_database(name, deleteData, cascade); - recv_drop_database(seqid); + int32_t seqid = send_get_fields(db_name, table_name); + recv_get_fields(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_database(const std::string& name, const bool deleteData, const bool cascade) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields(const std::string& db_name, const std::string& table_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_fields", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_database_pargs args; - args.name = &name; - args.deleteData = &deleteData; - args.cascade = &cascade; + ThriftHiveMetastore_get_fields_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60182,7 +62932,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_database(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60211,7 +62961,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_database") != 0) { + if (fname.compare("get_fields") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60220,11 +62970,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_database_presult result; + ThriftHiveMetastore_get_fields_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -60237,8 +62993,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid sentry.commit(); throw result.o3; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_fields failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60248,20 +63004,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_databases(std::vector & _return, const std::string& pattern) +void ThriftHiveMetastoreConcurrentClient::get_fields_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_databases(pattern); - recv_get_databases(_return, seqid); + int32_t seqid = send_get_fields_with_environment_context(db_name, table_name, environment_context); + recv_get_fields_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_databases(const std::string& pattern) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_fields_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_databases_pargs args; - args.pattern = &pattern; + ThriftHiveMetastore_get_fields_with_environment_context_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60272,7 +63030,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_databases(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_context(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60301,7 +63059,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_databases") != 0) { + if (fname.compare("get_fields_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60310,7 +63068,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vectorreadMessageEnd(); @@ -60325,8 +63083,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -60336,19 +63102,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vector & _return) +void ThriftHiveMetastoreConcurrentClient::get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name) { - int32_t seqid = send_get_all_databases(); - recv_get_all_databases(_return, seqid); + int32_t seqid = send_get_schema(db_name, table_name); + recv_get_schema(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_databases() +int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema(const std::string& db_name, const std::string& table_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_schema", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_databases_pargs args; + ThriftHiveMetastore_get_schema_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60359,7 +63127,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_databases() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60388,7 +63156,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_databases") != 0) { + if (fname.compare("get_schema") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60397,7 +63165,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vectorreadMessageEnd(); @@ -60412,8 +63180,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -60423,21 +63199,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { - int32_t seqid = send_alter_database(dbname, db); - recv_alter_database(seqid); + int32_t seqid = send_get_schema_with_environment_context(db_name, table_name, environment_context); + recv_get_schema_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_database(const std::string& dbname, const Database& db) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_database", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_schema_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_database_pargs args; - args.dbname = &dbname; - args.db = &db; + ThriftHiveMetastore_get_schema_with_environment_context_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60448,7 +63225,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_database(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_context(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60477,7 +63254,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqi iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_database") != 0) { + if (fname.compare("get_schema_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60486,11 +63263,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqi using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_database_presult result; + ThriftHiveMetastore_get_schema_with_environment_context_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -60499,8 +63282,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqi sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_schema_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60510,20 +63297,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqi } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_type(Type& _return, const std::string& name) +void ThriftHiveMetastoreConcurrentClient::create_table(const Table& tbl) { - int32_t seqid = send_get_type(name); - recv_get_type(_return, seqid); + int32_t seqid = send_create_table(tbl); + recv_create_table(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_type(const std::string& name) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_table(const Table& tbl) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_type_pargs args; - args.name = &name; + ThriftHiveMetastore_create_table_pargs args; + args.tbl = &tbl; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60534,7 +63321,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_type(const std::string& na return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) { int32_t rseqid = 0; @@ -60563,7 +63350,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_type") != 0) { + if (fname.compare("create_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60572,17 +63359,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_type_presult result; - result.success = &_return; + ThriftHiveMetastore_create_table_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -60591,8 +63372,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type failed: unknown result"); + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60602,20 +63391,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::create_type(const Type& type) +void ThriftHiveMetastoreConcurrentClient::create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) { - int32_t seqid = send_create_type(type); - return recv_create_type(seqid); + int32_t seqid = send_create_table_with_environment_context(tbl, environment_context); + recv_create_table_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_type(const Type& type) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_type_pargs args; - args.type = &type; + ThriftHiveMetastore_create_table_with_environment_context_pargs args; + args.tbl = &tbl; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60626,7 +63416,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_type(const Type& type) return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -60655,7 +63445,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_type") != 0) { + if (fname.compare("create_table_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60664,17 +63454,11 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_create_type_presult result; - result.success = &_return; + ThriftHiveMetastore_create_table_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - sentry.commit(); - return _return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -60687,8 +63471,12 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) sentry.commit(); throw result.o3; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_type failed: unknown result"); + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60698,20 +63486,24 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_type(const std::string& type) +void ThriftHiveMetastoreConcurrentClient::create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) { - int32_t seqid = send_drop_type(type); - return recv_drop_type(seqid); + int32_t seqid = send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); + recv_create_table_with_constraints(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_type(const std::string& type) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_type_pargs args; - args.type = &type; + ThriftHiveMetastore_create_table_with_constraints_pargs args; + args.tbl = &tbl; + args.primaryKeys = &primaryKeys; + args.foreignKeys = &foreignKeys; + args.uniqueConstraints = &uniqueConstraints; + args.notNullConstraints = ¬NullConstraints; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60722,7 +63514,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_type(const std::string& t return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(const int32_t seqid) { int32_t rseqid = 0; @@ -60751,7 +63543,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_type") != 0) { + if (fname.compare("create_table_with_constraints") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60760,17 +63552,11 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_type_presult result; - result.success = &_return; + ThriftHiveMetastore_create_table_with_constraints_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - sentry.commit(); - return _return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -60779,8 +63565,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_type failed: unknown result"); + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60790,20 +63584,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_type_all(std::map & _return, const std::string& name) +void ThriftHiveMetastoreConcurrentClient::drop_constraint(const DropConstraintRequest& req) { - int32_t seqid = send_get_type_all(name); - recv_get_type_all(_return, seqid); + int32_t seqid = send_drop_constraint(req); + recv_drop_constraint(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_type_all(const std::string& name) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_constraint(const DropConstraintRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_type_all", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_type_all_pargs args; - args.name = &name; + ThriftHiveMetastore_drop_constraint_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60814,7 +63608,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_type_all(const std::string return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::map & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seqid) { int32_t rseqid = 0; @@ -60843,7 +63637,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::mapreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_type_all") != 0) { + if (fname.compare("drop_constraint") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60852,23 +63646,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::mapreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } - if (result.__isset.o2) { + if (result.__isset.o3) { sentry.commit(); - throw result.o2; + throw result.o3; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type_all failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60878,21 +63670,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::map & _return, const std::string& db_name, const std::string& table_name) +void ThriftHiveMetastoreConcurrentClient::add_primary_key(const AddPrimaryKeyRequest& req) { - int32_t seqid = send_get_fields(db_name, table_name); - recv_get_fields(_return, seqid); + int32_t seqid = send_add_primary_key(req); + recv_add_primary_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields(const std::string& db_name, const std::string& table_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_primary_key(const AddPrimaryKeyRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_fields", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_primary_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_fields_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; + ThriftHiveMetastore_add_primary_key_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60903,7 +63694,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seqid) { int32_t rseqid = 0; @@ -60932,7 +63723,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_fields") != 0) { + if (fname.compare("add_primary_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60941,17 +63732,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -60960,12 +63745,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -60975,22 +63756,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::add_foreign_key(const AddForeignKeyRequest& req) { - int32_t seqid = send_get_fields_with_environment_context(db_name, table_name, environment_context); - recv_get_fields_with_environment_context(_return, seqid); + int32_t seqid = send_add_foreign_key(req); + recv_add_foreign_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_foreign_key(const AddForeignKeyRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_fields_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_foreign_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_fields_with_environment_context_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; - args.environment_context = &environment_context; + ThriftHiveMetastore_add_foreign_key_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61001,7 +63780,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields_with_environment_co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_context(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seqid) { int32_t rseqid = 0; @@ -61030,7 +63809,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_fields_with_environment_context") != 0) { + if (fname.compare("add_foreign_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61039,17 +63818,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_fields_with_environment_context_presult result; - result.success = &_return; + ThriftHiveMetastore_add_foreign_key_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -61058,12 +63831,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_fields_with_environment_context failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61073,21 +63842,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name) +void ThriftHiveMetastoreConcurrentClient::add_unique_constraint(const AddUniqueConstraintRequest& req) { - int32_t seqid = send_get_schema(db_name, table_name); - recv_get_schema(_return, seqid); + int32_t seqid = send_add_unique_constraint(req); + recv_add_unique_constraint(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema(const std::string& db_name, const std::string& table_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_unique_constraint(const AddUniqueConstraintRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_schema", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_unique_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_schema_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; + ThriftHiveMetastore_add_unique_constraint_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61098,7 +63866,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32_t seqid) { int32_t rseqid = 0; @@ -61127,7 +63895,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_schema") != 0) { + if (fname.compare("add_unique_constraint") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61136,17 +63904,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -61155,12 +63917,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -61170,22 +63928,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::add_not_null_constraint(const AddNotNullConstraintRequest& req) { - int32_t seqid = send_get_schema_with_environment_context(db_name, table_name, environment_context); - recv_get_schema_with_environment_context(_return, seqid); + int32_t seqid = send_add_not_null_constraint(req); + recv_add_not_null_constraint(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_not_null_constraint(const AddNotNullConstraintRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_schema_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_not_null_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_schema_with_environment_context_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; - args.environment_context = &environment_context; + ThriftHiveMetastore_add_not_null_constraint_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61196,7 +63952,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema_with_environment_co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_context(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int32_t seqid) { int32_t rseqid = 0; @@ -61225,7 +63981,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_schema_with_environment_context") != 0) { + if (fname.compare("add_not_null_constraint") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61234,17 +63990,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_schema_with_environment_context_presult result; - result.success = &_return; + ThriftHiveMetastore_add_not_null_constraint_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -61253,12 +64003,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_schema_with_environment_context failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61268,20 +64014,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_table(const Table& tbl) +void ThriftHiveMetastoreConcurrentClient::drop_table(const std::string& dbname, const std::string& name, const bool deleteData) { - int32_t seqid = send_create_table(tbl); - recv_create_table(seqid); + int32_t seqid = send_drop_table(dbname, name, deleteData); + recv_drop_table(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_table(const Table& tbl) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_pargs args; - args.tbl = &tbl; + ThriftHiveMetastore_drop_table_pargs args; + args.dbname = &dbname; + args.name = &name; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61292,7 +64040,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_table(const Table& tbl) return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) { int32_t rseqid = 0; @@ -61321,7 +64069,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table") != 0) { + if (fname.compare("drop_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61330,7 +64078,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_table_presult result; + ThriftHiveMetastore_drop_table_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61339,18 +64087,10 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } if (result.__isset.o3) { sentry.commit(); throw result.o3; } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } sentry.commit(); return; } @@ -61362,20 +64102,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) { - int32_t seqid = send_create_table_with_environment_context(tbl, environment_context); - recv_create_table_with_environment_context(seqid); + int32_t seqid = send_drop_table_with_environment_context(dbname, name, deleteData, environment_context); + recv_drop_table_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_with_environment_context_pargs args; - args.tbl = &tbl; + ThriftHiveMetastore_drop_table_with_environment_context_pargs args; + args.dbname = &dbname; + args.name = &name; + args.deleteData = &deleteData; args.environment_context = &environment_context; args.write(oprot_); @@ -61387,7 +64129,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_environment_ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -61416,7 +64158,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table_with_environment_context") != 0) { + if (fname.compare("drop_table_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61425,7 +64167,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_table_with_environment_context_presult result; + ThriftHiveMetastore_drop_table_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61434,18 +64176,10 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } if (result.__isset.o3) { sentry.commit(); throw result.o3; } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } sentry.commit(); return; } @@ -61457,24 +64191,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) +void ThriftHiveMetastoreConcurrentClient::truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames) { - int32_t seqid = send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); - recv_create_table_with_constraints(seqid); + int32_t seqid = send_truncate_table(dbName, tableName, partNames); + recv_truncate_table(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) +int32_t ThriftHiveMetastoreConcurrentClient::send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("truncate_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_with_constraints_pargs args; - args.tbl = &tbl; - args.primaryKeys = &primaryKeys; - args.foreignKeys = &foreignKeys; - args.uniqueConstraints = &uniqueConstraints; - args.notNullConstraints = ¬NullConstraints; + ThriftHiveMetastore_truncate_table_pargs args; + args.dbName = &dbName; + args.tableName = &tableName; + args.partNames = &partNames; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61485,7 +64217,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_constraints( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_truncate_table(const int32_t seqid) { int32_t rseqid = 0; @@ -61514,7 +64246,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(con iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table_with_constraints") != 0) { + if (fname.compare("truncate_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61523,7 +64255,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(con using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_table_with_constraints_presult result; + ThriftHiveMetastore_truncate_table_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61532,18 +64264,6 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(con sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } sentry.commit(); return; } @@ -61555,20 +64275,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(con } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_constraint(const DropConstraintRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern) { - int32_t seqid = send_drop_constraint(req); - recv_drop_constraint(seqid); + int32_t seqid = send_get_tables(db_name, pattern); + recv_get_tables(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_constraint(const DropConstraintRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables(const std::string& db_name, const std::string& pattern) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_constraint", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_tables", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_constraint_pargs args; - args.req = &req; + ThriftHiveMetastore_get_tables_pargs args; + args.db_name = &db_name; + args.pattern = &pattern; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61579,7 +64300,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_constraint(const DropCons return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -61608,7 +64329,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_constraint") != 0) { + if (fname.compare("get_tables") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61617,21 +64338,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_constraint_presult result; + ThriftHiveMetastore_get_tables_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o3) { + if (result.__isset.o1) { sentry.commit(); - throw result.o3; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_tables failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61641,20 +64364,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_primary_key(const AddPrimaryKeyRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_tables_by_type(std::vector & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType) { - int32_t seqid = send_add_primary_key(req); - recv_add_primary_key(seqid); + int32_t seqid = send_get_tables_by_type(db_name, pattern, tableType); + recv_get_tables_by_type(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_primary_key(const AddPrimaryKeyRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_primary_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_tables_by_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_primary_key_pargs args; - args.req = &req; + ThriftHiveMetastore_get_tables_by_type_pargs args; + args.db_name = &db_name; + args.pattern = &pattern; + args.tableType = &tableType; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61665,7 +64390,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_primary_key(const AddPrima return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -61694,7 +64419,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_primary_key") != 0) { + if (fname.compare("get_tables_by_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61703,21 +64428,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_primary_key_presult result; + ThriftHiveMetastore_get_tables_by_type_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_tables_by_type failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61727,20 +64454,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_foreign_key(const AddForeignKeyRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_table_meta(std::vector & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) { - int32_t seqid = send_add_foreign_key(req); - recv_add_foreign_key(seqid); + int32_t seqid = send_get_table_meta(db_patterns, tbl_patterns, tbl_types); + recv_get_table_meta(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_foreign_key(const AddForeignKeyRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_foreign_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_meta", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_foreign_key_pargs args; - args.req = &req; + ThriftHiveMetastore_get_table_meta_pargs args; + args.db_patterns = &db_patterns; + args.tbl_patterns = &tbl_patterns; + args.tbl_types = &tbl_types; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61751,7 +64480,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_foreign_key(const AddForei return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -61780,7 +64509,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_foreign_key") != 0) { + if (fname.compare("get_table_meta") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61789,21 +64518,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_foreign_key_presult result; + ThriftHiveMetastore_get_table_meta_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_meta failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61813,20 +64544,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_unique_constraint(const AddUniqueConstraintRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_all_tables(std::vector & _return, const std::string& db_name) { - int32_t seqid = send_add_unique_constraint(req); - recv_add_unique_constraint(seqid); + int32_t seqid = send_get_all_tables(db_name); + recv_get_all_tables(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_unique_constraint(const AddUniqueConstraintRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_tables(const std::string& db_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_unique_constraint", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_tables", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_unique_constraint_pargs args; - args.req = &req; + ThriftHiveMetastore_get_all_tables_pargs args; + args.db_name = &db_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61837,7 +64568,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_unique_constraint(const Ad return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -61866,7 +64597,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32 iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_unique_constraint") != 0) { + if (fname.compare("get_all_tables") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61875,21 +64606,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32 using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_unique_constraint_presult result; + ThriftHiveMetastore_get_all_tables_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_tables failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61899,20 +64632,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32 } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_not_null_constraint(const AddNotNullConstraintRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_table(Table& _return, const std::string& dbname, const std::string& tbl_name) { - int32_t seqid = send_add_not_null_constraint(req); - recv_add_not_null_constraint(seqid); + int32_t seqid = send_get_table(dbname, tbl_name); + recv_get_table(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_not_null_constraint(const AddNotNullConstraintRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table(const std::string& dbname, const std::string& tbl_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_not_null_constraint", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_not_null_constraint_pargs args; - args.req = &req; + ThriftHiveMetastore_get_table_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61923,7 +64657,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_not_null_constraint(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -61952,7 +64686,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_not_null_constraint") != 0) { + if (fname.compare("get_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61961,11 +64695,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_not_null_constraint_presult result; + ThriftHiveMetastore_get_table_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -61974,8 +64714,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int sentry.commit(); throw result.o2; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61985,22 +64725,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_table(const std::string& dbname, const std::string& name, const bool deleteData) +void ThriftHiveMetastoreConcurrentClient::get_table_objects_by_name(std::vector & _return, const std::string& dbname, const std::vector & tbl_names) { - int32_t seqid = send_drop_table(dbname, name, deleteData); - recv_drop_table(seqid); + int32_t seqid = send_get_table_objects_by_name(dbname, tbl_names); + recv_get_table_objects_by_name(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name(const std::string& dbname, const std::vector & tbl_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_objects_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_table_pargs args; + ThriftHiveMetastore_get_table_objects_by_name_pargs args; args.dbname = &dbname; - args.name = &name; - args.deleteData = &deleteData; + args.tbl_names = &tbl_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62011,7 +64750,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::vector
& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -62040,7 +64779,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_table") != 0) { + if (fname.compare("get_table_objects_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62049,21 +64788,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_table_presult result; + ThriftHiveMetastore_get_table_objects_by_name_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o3) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o3; + return; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_objects_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62073,23 +64810,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::get_table_req(GetTableResult& _return, const GetTableRequest& req) { - int32_t seqid = send_drop_table_with_environment_context(dbname, name, deleteData, environment_context); - recv_drop_table_with_environment_context(seqid); + int32_t seqid = send_get_table_req(req); + recv_get_table_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_req(const GetTableRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_table_with_environment_context_pargs args; - args.dbname = &dbname; - args.name = &name; - args.deleteData = &deleteData; - args.environment_context = &environment_context; + ThriftHiveMetastore_get_table_req_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62100,7 +64834,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table_with_environment_co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_req(GetTableResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -62129,7 +64863,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_conte iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_table_with_environment_context") != 0) { + if (fname.compare("get_table_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62138,21 +64872,27 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_conte using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_table_with_environment_context_presult result; + ThriftHiveMetastore_get_table_req_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o3) { + if (result.__isset.o2) { sentry.commit(); - throw result.o3; + throw result.o2; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62162,22 +64902,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_conte } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames) +void ThriftHiveMetastoreConcurrentClient::get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req) { - int32_t seqid = send_truncate_table(dbName, tableName, partNames); - recv_truncate_table(seqid); + int32_t seqid = send_get_table_objects_by_name_req(req); + recv_get_table_objects_by_name_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name_req(const GetTablesRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("truncate_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_objects_by_name_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_truncate_table_pargs args; - args.dbName = &dbName; - args.tableName = &tableName; - args.partNames = &partNames; + ThriftHiveMetastore_get_table_objects_by_name_req_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62188,7 +64926,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_truncate_table(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_truncate_table(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name_req(GetTablesResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -62217,7 +64955,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_truncate_table(const int32_t seqi iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("truncate_table") != 0) { + if (fname.compare("get_table_objects_by_name_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62226,17 +64964,31 @@ void ThriftHiveMetastoreConcurrentClient::recv_truncate_table(const int32_t seqi using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_truncate_table_presult result; + ThriftHiveMetastore_get_table_objects_by_name_req_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - sentry.commit(); - return; + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_objects_by_name_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62246,21 +64998,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_truncate_table(const int32_t seqi } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern) +void ThriftHiveMetastoreConcurrentClient::get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) { - int32_t seqid = send_get_tables(db_name, pattern); - recv_get_tables(_return, seqid); + int32_t seqid = send_get_table_names_by_filter(dbname, filter, max_tables); + recv_get_table_names_by_filter(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables(const std::string& db_name, const std::string& pattern) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_tables", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_names_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_tables_pargs args; - args.db_name = &db_name; - args.pattern = &pattern; + ThriftHiveMetastore_get_table_names_by_filter_pargs args; + args.dbname = &dbname; + args.filter = &filter; + args.max_tables = &max_tables; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62271,7 +65024,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -62300,7 +65053,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_tables") != 0) { + if (fname.compare("get_table_names_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62309,7 +65062,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vectorreadMessageEnd(); @@ -62324,8 +65077,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -62335,22 +65096,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType) +void ThriftHiveMetastoreConcurrentClient::alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) { - int32_t seqid = send_get_tables_by_type(db_name, pattern, tableType); - recv_get_tables_by_type(_return, seqid); + int32_t seqid = send_alter_table(dbname, tbl_name, new_tbl); + recv_alter_table(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_tables_by_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_tables_by_type_pargs args; - args.db_name = &db_name; - args.pattern = &pattern; - args.tableType = &tableType; + ThriftHiveMetastore_alter_table_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; + args.new_tbl = &new_tbl; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62361,7 +65122,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables_by_type(const std:: return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) { int32_t rseqid = 0; @@ -62390,7 +65151,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_tables_by_type") != 0) { + if (fname.compare("alter_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62399,23 +65160,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_tables_by_type failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62425,22 +65184,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vector & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) +void ThriftHiveMetastoreConcurrentClient::alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_table_meta(db_patterns, tbl_patterns, tbl_types); - recv_get_table_meta(_return, seqid); + int32_t seqid = send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context); + recv_alter_table_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_meta", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_meta_pargs args; - args.db_patterns = &db_patterns; - args.tbl_patterns = &tbl_patterns; - args.tbl_types = &tbl_types; + ThriftHiveMetastore_alter_table_with_environment_context_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; + args.new_tbl = &new_tbl; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62451,7 +65211,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_meta(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -62480,7 +65240,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_meta") != 0) { + if (fname.compare("alter_table_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62489,23 +65249,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_meta failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62515,20 +65273,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vector & _return, const std::string& db_name) +void ThriftHiveMetastoreConcurrentClient::alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) { - int32_t seqid = send_get_all_tables(db_name); - recv_get_all_tables(_return, seqid); + int32_t seqid = send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade); + recv_alter_table_with_cascade(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_tables(const std::string& db_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_all_tables", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_tables_pargs args; - args.db_name = &db_name; + ThriftHiveMetastore_alter_table_with_cascade_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; + args.new_tbl = &new_tbl; + args.cascade = &cascade; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62539,7 +65300,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_tables(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const int32_t seqid) { int32_t rseqid = 0; @@ -62568,7 +65329,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_tables") != 0) { + if (fname.compare("alter_table_with_cascade") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62577,23 +65338,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_tables failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62603,21 +65362,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_pargs args; - args.dbname = &dbname; - args.tbl_name = &tbl_name; + ThriftHiveMetastore_add_partition_pargs args; + args.new_part = &new_part; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62628,7 +65386,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table(const std::string& d return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -62657,7 +65415,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const i iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table") != 0) { + if (fname.compare("add_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62666,7 +65424,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const i using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_presult result; + ThriftHiveMetastore_add_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -62685,8 +65443,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const i sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62696,21 +65458,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const i } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_objects_by_name(std::vector
& _return, const std::string& dbname, const std::vector & tbl_names) +void ThriftHiveMetastoreConcurrentClient::add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_table_objects_by_name(dbname, tbl_names); - recv_get_table_objects_by_name(_return, seqid); + int32_t seqid = send_add_partition_with_environment_context(new_part, environment_context); + recv_add_partition_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name(const std::string& dbname, const std::vector & tbl_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition_with_environment_context(const Partition& new_part, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_objects_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_objects_by_name_pargs args; - args.dbname = &dbname; - args.tbl_names = &tbl_names; + ThriftHiveMetastore_add_partition_with_environment_context_pargs args; + args.new_part = &new_part; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62721,7 +65483,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name(cons return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::vector
& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_context(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -62750,7 +65512,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::ve iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_objects_by_name") != 0) { + if (fname.compare("add_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62759,7 +65521,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::ve using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_objects_by_name_presult result; + ThriftHiveMetastore_add_partition_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -62770,8 +65532,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::ve sentry.commit(); return; } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_objects_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62781,20 +65555,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::ve } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_req(GetTableResult& _return, const GetTableRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::add_partitions(const std::vector & new_parts) { - int32_t seqid = send_get_table_req(req); - recv_get_table_req(_return, seqid); + int32_t seqid = send_add_partitions(new_parts); + return recv_add_partitions(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_req(const GetTableRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions(const std::vector & new_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_req_pargs args; - args.req = &req; + ThriftHiveMetastore_add_partitions_pargs args; + args.new_parts = &new_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62805,7 +65579,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_req(const GetTableRe return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_req(GetTableResult& _return, const int32_t seqid) +int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t seqid) { int32_t rseqid = 0; @@ -62834,7 +65608,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_req(GetTableResult& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_req") != 0) { + if (fname.compare("add_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62843,16 +65617,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_req(GetTableResult& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_req_presult result; + int32_t _return; + ThriftHiveMetastore_add_partitions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -62862,8 +65636,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_req(GetTableResult& _re sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_req failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62873,20 +65651,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_req(GetTableResult& _re } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::add_partitions_pspec(const std::vector & new_parts) { - int32_t seqid = send_get_table_objects_by_name_req(req); - recv_get_table_objects_by_name_req(_return, seqid); + int32_t seqid = send_add_partitions_pspec(new_parts); + return recv_add_partitions_pspec(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name_req(const GetTablesRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_pspec(const std::vector & new_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_objects_by_name_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_objects_by_name_req_pargs args; - args.req = &req; + ThriftHiveMetastore_add_partitions_pspec_pargs args; + args.new_parts = &new_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62897,7 +65675,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name_req( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name_req(GetTablesResult& _return, const int32_t seqid) +int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int32_t seqid) { int32_t rseqid = 0; @@ -62926,7 +65704,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name_req(Get iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_objects_by_name_req") != 0) { + if (fname.compare("add_partitions_pspec") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62935,16 +65713,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name_req(Get using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_objects_by_name_req_presult result; + int32_t _return; + ThriftHiveMetastore_add_partitions_pspec_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -62959,7 +65737,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name_req(Get throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_objects_by_name_req failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_pspec failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62969,22 +65747,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name_req(Get } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) +void ThriftHiveMetastoreConcurrentClient::append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { - int32_t seqid = send_get_table_names_by_filter(dbname, filter, max_tables); - recv_get_table_names_by_filter(_return, seqid); + int32_t seqid = send_append_partition(db_name, tbl_name, part_vals); + recv_append_partition(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables) +int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_names_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_names_by_filter_pargs args; - args.dbname = &dbname; - args.filter = &filter; - args.max_tables = &max_tables; + ThriftHiveMetastore_append_partition_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62995,7 +65773,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_names_by_filter(cons return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -63024,7 +65802,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::ve iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_names_by_filter") != 0) { + if (fname.compare("append_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63033,7 +65811,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::ve using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_names_by_filter_presult result; + ThriftHiveMetastore_append_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -63057,7 +65835,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::ve throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_names_by_filter failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63067,22 +65845,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::ve } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) +void ThriftHiveMetastoreConcurrentClient::add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request) { - int32_t seqid = send_alter_table(dbname, tbl_name, new_tbl); - recv_alter_table(seqid); + int32_t seqid = send_add_partitions_req(request); + recv_add_partitions_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_req(const AddPartitionsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_table_pargs args; - args.dbname = &dbname; - args.tbl_name = &tbl_name; - args.new_tbl = &new_tbl; + ThriftHiveMetastore_add_partitions_req_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63093,7 +65869,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -63122,7 +65898,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_table") != 0) { + if (fname.compare("add_partitions_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63131,11 +65907,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_table_presult result; + ThriftHiveMetastore_add_partitions_req_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -63144,8 +65926,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63155,22 +65941,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) { - int32_t seqid = send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context); - recv_alter_table_with_environment_context(seqid); + int32_t seqid = send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context); + recv_append_partition_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_table_with_environment_context_pargs args; - args.dbname = &dbname; + ThriftHiveMetastore_append_partition_with_environment_context_pargs args; + args.db_name = &db_name; args.tbl_name = &tbl_name; - args.new_tbl = &new_tbl; + args.part_vals = &part_vals; args.environment_context = &environment_context; args.write(oprot_); @@ -63182,7 +65968,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_environment_c return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment_context(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -63211,7 +65997,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_cont iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_table_with_environment_context") != 0) { + if (fname.compare("append_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63220,11 +66006,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_cont using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_table_with_environment_context_presult result; + ThriftHiveMetastore_append_partition_with_environment_context_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -63233,8 +66025,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_cont sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63244,23 +66040,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_cont } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) +void ThriftHiveMetastoreConcurrentClient::append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) { - int32_t seqid = send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade); - recv_alter_table_with_cascade(seqid); + int32_t seqid = send_append_partition_by_name(db_name, tbl_name, part_name); + recv_append_partition_by_name(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) +int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_table_with_cascade_pargs args; - args.dbname = &dbname; + ThriftHiveMetastore_append_partition_by_name_pargs args; + args.db_name = &db_name; args.tbl_name = &tbl_name; - args.new_tbl = &new_tbl; - args.cascade = &cascade; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63271,7 +66066,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_cascade(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -63300,7 +66095,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const in iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_table_with_cascade") != 0) { + if (fname.compare("append_partition_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63309,11 +66104,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const in using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_table_with_cascade_presult result; + ThriftHiveMetastore_append_partition_by_name_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -63322,8 +66123,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const in sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63333,20 +66138,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const in } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_partition(Partition& _return, const Partition& new_part) +void ThriftHiveMetastoreConcurrentClient::append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) { - int32_t seqid = send_add_partition(new_part); - recv_add_partition(_return, seqid); + int32_t seqid = send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context); + recv_append_partition_by_name_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition(const Partition& new_part) +int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partition_pargs args; - args.new_part = &new_part; + ThriftHiveMetastore_append_partition_by_name_with_environment_context_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_name = &part_name; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63357,7 +66165,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition(const Partition& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_environment_context(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -63386,7 +66194,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partition") != 0) { + if (fname.compare("append_partition_by_name_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63395,7 +66203,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_partition_presult result; + ThriftHiveMetastore_append_partition_by_name_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -63419,7 +66227,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63429,21 +66237,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context) +bool ThriftHiveMetastoreConcurrentClient::drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) { - int32_t seqid = send_add_partition_with_environment_context(new_part, environment_context); - recv_add_partition_with_environment_context(_return, seqid); + int32_t seqid = send_drop_partition(db_name, tbl_name, part_vals, deleteData); + return recv_drop_partition(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition_with_environment_context(const Partition& new_part, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partition_with_environment_context_pargs args; - args.new_part = &new_part; - args.environment_context = &environment_context; + ThriftHiveMetastore_drop_partition_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63454,7 +66264,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition_with_environment return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_context(Partition& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqid) { int32_t rseqid = 0; @@ -63483,7 +66293,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_co iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partition_with_environment_context") != 0) { + if (fname.compare("drop_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63492,16 +66302,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_co using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_partition_with_environment_context_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -63511,12 +66321,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_co sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63526,20 +66332,24 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_co } // end while(true) } -int32_t ThriftHiveMetastoreConcurrentClient::add_partitions(const std::vector & new_parts) +bool ThriftHiveMetastoreConcurrentClient::drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) { - int32_t seqid = send_add_partitions(new_parts); - return recv_add_partitions(seqid); + int32_t seqid = send_drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, environment_context); + return recv_drop_partition_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions(const std::vector & new_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partitions_pargs args; - args.new_parts = &new_parts; + ThriftHiveMetastore_drop_partition_with_environment_context_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.deleteData = &deleteData; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63550,7 +66360,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions(const std::vect return cseqid; } -int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -63579,7 +66389,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partitions") != 0) { + if (fname.compare("drop_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63588,8 +66398,8 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t s using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int32_t _return; - ThriftHiveMetastore_add_partitions_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -63607,12 +66417,8 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t s sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63622,20 +66428,23 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t s } // end while(true) } -int32_t ThriftHiveMetastoreConcurrentClient::add_partitions_pspec(const std::vector & new_parts) +bool ThriftHiveMetastoreConcurrentClient::drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) { - int32_t seqid = send_add_partitions_pspec(new_parts); - return recv_add_partitions_pspec(seqid); + int32_t seqid = send_drop_partition_by_name(db_name, tbl_name, part_name, deleteData); + return recv_drop_partition_by_name(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_pspec(const std::vector & new_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partitions_pspec_pargs args; - args.new_parts = &new_parts; + ThriftHiveMetastore_drop_partition_by_name_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_name = &part_name; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63646,7 +66455,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_pspec(const std return cseqid; } -int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int32_t seqid) { int32_t rseqid = 0; @@ -63675,7 +66484,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partitions_pspec") != 0) { + if (fname.compare("drop_partition_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63684,8 +66493,8 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int32_t _return; - ThriftHiveMetastore_add_partitions_pspec_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -63703,12 +66512,8 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_pspec failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63718,22 +66523,24 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +bool ThriftHiveMetastoreConcurrentClient::drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) { - int32_t seqid = send_append_partition(db_name, tbl_name, part_vals); - recv_append_partition(_return, seqid); + int32_t seqid = send_drop_partition_by_name_with_environment_context(db_name, tbl_name, part_name, deleteData, environment_context); + return recv_drop_partition_by_name_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("append_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_pargs args; + ThriftHiveMetastore_drop_partition_by_name_with_environment_context_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; + args.part_name = &part_name; + args.deleteData = &deleteData; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63744,7 +66551,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition(const std::st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -63773,7 +66580,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _retu iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition") != 0) { + if (fname.compare("drop_partition_by_name_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63782,16 +66589,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _retu using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_append_partition_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_by_name_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -63801,12 +66608,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _retu sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_by_name_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63816,20 +66619,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _retu } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request) +void ThriftHiveMetastoreConcurrentClient::drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req) { - int32_t seqid = send_add_partitions_req(request); - recv_add_partitions_req(_return, seqid); + int32_t seqid = send_drop_partitions_req(req); + recv_drop_partitions_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_req(const AddPartitionsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partitions_req(const DropPartitionsRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partitions_req_pargs args; - args.request = &request; + ThriftHiveMetastore_drop_partitions_req_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63840,7 +66643,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_req(const AddPa return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartitionsResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -63869,7 +66672,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsR iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partitions_req") != 0) { + if (fname.compare("drop_partitions_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63878,7 +66681,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsR using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_partitions_req_presult result; + ThriftHiveMetastore_drop_partitions_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -63897,12 +66700,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsR sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_req failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partitions_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63912,23 +66711,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsR } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { - int32_t seqid = send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context); - recv_append_partition_with_environment_context(_return, seqid); + int32_t seqid = send_get_partition(db_name, tbl_name, part_vals); + recv_get_partition(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("append_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_with_environment_context_pargs args; + ThriftHiveMetastore_get_partition_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.part_vals = &part_vals; - args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63939,7 +66737,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_with_environm return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment_context(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -63968,7 +66766,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition_with_environment_context") != 0) { + if (fname.compare("get_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63977,7 +66775,103 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_append_partition_with_environment_context_presult result; + ThriftHiveMetastore_get_partition_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::exchange_partition(Partition& _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +{ + int32_t seqid = send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); + recv_exchange_partition(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partition(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("exchange_partition", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_exchange_partition_pargs args; + args.partitionSpecs = &partitionSpecs; + args.source_db = &source_db; + args.source_table_name = &source_table_name; + args.dest_db = &dest_db; + args.dest_table_name = &dest_table_name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("exchange_partition") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_exchange_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -64000,8 +66894,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment sentry.commit(); throw result.o3; } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "exchange_partition failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64011,22 +66909,24 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +void ThriftHiveMetastoreConcurrentClient::exchange_partitions(std::vector & _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) { - int32_t seqid = send_append_partition_by_name(db_name, tbl_name, part_name); - recv_append_partition_by_name(_return, seqid); + int32_t seqid = send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); + recv_exchange_partitions(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partitions(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("append_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("exchange_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_by_name_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_name = &part_name; + ThriftHiveMetastore_exchange_partitions_pargs args; + args.partitionSpecs = &partitionSpecs; + args.source_db = &source_db; + args.source_table_name = &source_table_name; + args.dest_db = &dest_db; + args.dest_table_name = &dest_table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64037,7 +66937,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64066,7 +66966,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partitio iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition_by_name") != 0) { + if (fname.compare("exchange_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64075,7 +66975,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partitio using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_append_partition_by_name_presult result; + ThriftHiveMetastore_exchange_partitions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -64098,8 +66998,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partitio sentry.commit(); throw result.o3; } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "exchange_partitions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64109,23 +67013,24 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partitio } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names) { - int32_t seqid = send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context); - recv_append_partition_by_name_with_environment_context(_return, seqid); + int32_t seqid = send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names); + recv_get_partition_with_auth(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("append_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_by_name_with_environment_context_pargs args; + ThriftHiveMetastore_get_partition_with_auth_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.environment_context = &environment_context; + args.part_vals = &part_vals; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64136,7 +67041,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name_with_ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_environment_context(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64165,7 +67070,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_env iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition_by_name_with_environment_context") != 0) { + if (fname.compare("get_partition_with_auth") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64174,7 +67079,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_env using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_append_partition_by_name_with_environment_context_presult result; + ThriftHiveMetastore_get_partition_with_auth_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -64193,12 +67098,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_env sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_with_auth failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64208,23 +67109,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_env } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) +void ThriftHiveMetastoreConcurrentClient::get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) { - int32_t seqid = send_drop_partition(db_name, tbl_name, part_vals, deleteData); - return recv_drop_partition(seqid); + int32_t seqid = send_get_partition_by_name(db_name, tbl_name, part_name); + recv_get_partition_by_name(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_pargs args; + ThriftHiveMetastore_get_partition_by_name_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.deleteData = &deleteData; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64235,7 +67135,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition(const std::stri return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64264,7 +67164,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqi iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition") != 0) { + if (fname.compare("get_partition_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64273,16 +67173,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqi using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_partition_presult result; + ThriftHiveMetastore_get_partition_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -64293,7 +67193,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqi throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64303,24 +67203,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqi } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { - int32_t seqid = send_drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, environment_context); - return recv_drop_partition_with_environment_context(seqid); + int32_t seqid = send_get_partitions(db_name, tbl_name, max_parts); + recv_get_partitions(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_with_environment_context_pargs args; + ThriftHiveMetastore_get_partitions_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.deleteData = &deleteData; - args.environment_context = &environment_context; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64331,7 +67229,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_with_environmen return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64360,7 +67258,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition_with_environment_context") != 0) { + if (fname.compare("get_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64369,16 +67267,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_partition_with_environment_context_presult result; + ThriftHiveMetastore_get_partitions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -64389,7 +67287,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_c throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64399,23 +67297,24 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_c } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) +void ThriftHiveMetastoreConcurrentClient::get_partitions_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { - int32_t seqid = send_drop_partition_by_name(db_name, tbl_name, part_name, deleteData); - return recv_drop_partition_by_name(seqid); + int32_t seqid = send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names); + recv_get_partitions_with_auth(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_by_name_pargs args; + ThriftHiveMetastore_get_partitions_with_auth_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.deleteData = &deleteData; + args.max_parts = &max_parts; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64426,7 +67325,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name(const s return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64455,7 +67354,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int3 iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition_by_name") != 0) { + if (fname.compare("get_partitions_with_auth") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64464,16 +67363,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int3 using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_partition_by_name_presult result; + ThriftHiveMetastore_get_partitions_with_auth_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -64484,7 +67383,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int3 throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_with_auth failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64494,116 +67393,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int3 } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::get_partitions_pspec(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) { - int32_t seqid = send_drop_partition_by_name_with_environment_context(db_name, tbl_name, part_name, deleteData, environment_context); - return recv_drop_partition_by_name_with_environment_context(seqid); + int32_t seqid = send_get_partitions_pspec(db_name, tbl_name, max_parts); + recv_get_partitions_pspec(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_by_name_with_environment_context_pargs args; + ThriftHiveMetastore_get_partitions_pspec_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.deleteData = &deleteData; - args.environment_context = &environment_context; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name_with_environment_context(const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("drop_partition_by_name_with_environment_context") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - bool _return; - ThriftHiveMetastore_drop_partition_by_name_with_environment_context_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - sentry.commit(); - return _return; - } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_by_name_with_environment_context failed: unknown result"); - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req) -{ - int32_t seqid = send_drop_partitions_req(req); - recv_drop_partitions_req(_return, seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partitions_req(const DropPartitionsRequest& req) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_drop_partitions_req_pargs args; - args.req = &req; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64614,7 +67419,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partitions_req(const Drop return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartitionsResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64643,7 +67448,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartition iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partitions_req") != 0) { + if (fname.compare("get_partitions_pspec") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64652,7 +67457,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartition using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_partitions_req_presult result; + ThriftHiveMetastore_get_partitions_pspec_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -64672,7 +67477,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartition throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partitions_req failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_pspec failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64682,22 +67487,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartition } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +void ThriftHiveMetastoreConcurrentClient::get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { - int32_t seqid = send_get_partition(db_name, tbl_name, part_vals); - recv_get_partition(_return, seqid); + int32_t seqid = send_get_partition_names(db_name, tbl_name, max_parts); + recv_get_partition_names(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_pargs args; + ThriftHiveMetastore_get_partition_names_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64708,7 +67513,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64737,7 +67542,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition") != 0) { + if (fname.compare("get_partition_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64746,7 +67551,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_presult result; + ThriftHiveMetastore_get_partition_names_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -64766,7 +67571,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64776,24 +67581,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::exchange_partition(Partition& _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +void ThriftHiveMetastoreConcurrentClient::get_partition_values(PartitionValuesResponse& _return, const PartitionValuesRequest& request) { - int32_t seqid = send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); - recv_exchange_partition(_return, seqid); + int32_t seqid = send_get_partition_values(request); + recv_get_partition_values(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partition(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_values(const PartitionValuesRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("exchange_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_values", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_exchange_partition_pargs args; - args.partitionSpecs = &partitionSpecs; - args.source_db = &source_db; - args.source_table_name = &source_table_name; - args.dest_db = &dest_db; - args.dest_table_name = &dest_table_name; + ThriftHiveMetastore_get_partition_values_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64804,7 +67605,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partition(const std:: return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_values(PartitionValuesResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64833,7 +67634,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("exchange_partition") != 0) { + if (fname.compare("get_partition_values") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64842,7 +67643,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_exchange_partition_presult result; + ThriftHiveMetastore_get_partition_values_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -64861,16 +67662,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _re sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "exchange_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_values failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64880,24 +67673,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _re } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::exchange_partitions(std::vector & _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +void ThriftHiveMetastoreConcurrentClient::get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { - int32_t seqid = send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); - recv_exchange_partitions(_return, seqid); + int32_t seqid = send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + recv_get_partitions_ps(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partitions(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("exchange_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_ps", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_exchange_partitions_pargs args; - args.partitionSpecs = &partitionSpecs; - args.source_db = &source_db; - args.source_table_name = &source_table_name; - args.dest_db = &dest_db; - args.dest_table_name = &dest_table_name; + ThriftHiveMetastore_get_partitions_ps_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64908,7 +67700,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partitions(const std: return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64937,7 +67729,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector

readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("exchange_partitions") != 0) { + if (fname.compare("get_partitions_ps") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64946,7 +67738,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector

readMessageEnd(); @@ -64965,16 +67757,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector

sync_.updatePending(fname, mtype, rseqid); @@ -64984,22 +67768,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector

& part_vals, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreConcurrentClient::get_partitions_ps_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { - int32_t seqid = send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names); - recv_get_partition_with_auth(_return, seqid); + int32_t seqid = send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names); + recv_get_partitions_ps_with_auth(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_ps_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_with_auth_pargs args; + ThriftHiveMetastore_get_partitions_ps_with_auth_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.part_vals = &part_vals; + args.max_parts = &max_parts; args.user_name = &user_name; args.group_names = &group_names; args.write(oprot_); @@ -65012,7 +67797,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_with_auth(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65041,7 +67826,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_with_auth") != 0) { + if (fname.compare("get_partitions_ps_with_auth") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65050,7 +67835,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_with_auth_presult result; + ThriftHiveMetastore_get_partitions_ps_with_auth_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -65070,7 +67855,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_with_auth failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_ps_with_auth failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65080,22 +67865,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +void ThriftHiveMetastoreConcurrentClient::get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { - int32_t seqid = send_get_partition_by_name(db_name, tbl_name, part_name); - recv_get_partition_by_name(_return, seqid); + int32_t seqid = send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts); + recv_get_partition_names_ps(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_names_ps", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_by_name_pargs args; + ThriftHiveMetastore_get_partition_names_ps_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_name = &part_name; + args.part_vals = &part_vals; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65106,7 +67892,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_by_name(const st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65135,7 +67921,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_by_name") != 0) { + if (fname.compare("get_partition_names_ps") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65144,7 +67930,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_by_name_presult result; + ThriftHiveMetastore_get_partition_names_ps_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -65164,7 +67950,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names_ps failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65174,21 +67960,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) +void ThriftHiveMetastoreConcurrentClient::get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) { - int32_t seqid = send_get_partitions(db_name, tbl_name, max_parts); - recv_get_partitions(_return, seqid); + int32_t seqid = send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts); + recv_get_partitions_by_filter(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_pargs args; + ThriftHiveMetastore_get_partitions_by_filter_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; + args.filter = &filter; args.max_parts = &max_parts; args.write(oprot_); @@ -65200,7 +67987,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65229,7 +68016,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions") != 0) { + if (fname.compare("get_partitions_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65238,7 +68025,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vectorreadMessageEnd(); @@ -65258,7 +68045,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -65268,24 +68055,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreConcurrentClient::get_part_specs_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) { - int32_t seqid = send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names); - recv_get_partitions_with_auth(_return, seqid); + int32_t seqid = send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts); + recv_get_part_specs_by_filter(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_part_specs_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_with_auth_pargs args; + ThriftHiveMetastore_get_part_specs_by_filter_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; + args.filter = &filter; args.max_parts = &max_parts; - args.user_name = &user_name; - args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65296,7 +68082,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_with_auth(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65325,7 +68111,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vec iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_with_auth") != 0) { + if (fname.compare("get_part_specs_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65334,7 +68120,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vec using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_with_auth_presult result; + ThriftHiveMetastore_get_part_specs_by_filter_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -65354,7 +68140,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vec throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_with_auth failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_part_specs_by_filter failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65364,22 +68150,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vec } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_pspec(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) +void ThriftHiveMetastoreConcurrentClient::get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req) { - int32_t seqid = send_get_partitions_pspec(db_name, tbl_name, max_parts); - recv_get_partitions_pspec(_return, seqid); + int32_t seqid = send_get_partitions_by_expr(req); + recv_get_partitions_by_expr(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_expr(const PartitionsByExprRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_pspec_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.max_parts = &max_parts; + ThriftHiveMetastore_get_partitions_by_expr_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65390,7 +68174,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_pspec(const std return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(PartitionsByExprResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65419,7 +68203,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector< iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_pspec") != 0) { + if (fname.compare("get_partitions_by_expr") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65428,7 +68212,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector< using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_pspec_presult result; + ThriftHiveMetastore_get_partitions_by_expr_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -65448,7 +68232,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector< throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_pspec failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_expr failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65458,22 +68242,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector< } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) { - int32_t seqid = send_get_partition_names(db_name, tbl_name, max_parts); - recv_get_partition_names(_return, seqid); + int32_t seqid = send_get_num_partitions_by_filter(db_name, tbl_name, filter); + return recv_get_num_partitions_by_filter(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_names", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_num_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_names_pargs args; + ThriftHiveMetastore_get_num_partitions_by_filter_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.max_parts = &max_parts; + args.filter = &filter; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65484,7 +68268,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names(const std: return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names(std::vector & _return, const int32_t seqid) +int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(const int32_t seqid) { int32_t rseqid = 0; @@ -65513,7 +68297,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_names") != 0) { + if (fname.compare("get_num_partitions_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65522,16 +68306,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -65542,7 +68326,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -65552,20 +68336,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) { - int32_t seqid = send_get_partition_values(request); - recv_get_partition_values(_return, seqid); + int32_t seqid = send_get_partitions_by_names(db_name, tbl_name, names); + recv_get_partitions_by_names(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_values(const PartitionValuesRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_values", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_values_pargs args; - args.request = &request; + ThriftHiveMetastore_get_partitions_by_names_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.names = &names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65576,7 +68362,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_values(const Par return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_values(PartitionValuesResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65605,7 +68391,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_values(PartitionVal iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_values") != 0) { + if (fname.compare("get_partitions_by_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65614,7 +68400,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_values(PartitionVal using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_values_presult result; + ThriftHiveMetastore_get_partitions_by_names_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -65634,7 +68420,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_values(PartitionVal throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_values failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_names failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65644,23 +68430,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_values(PartitionVal } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +void ThriftHiveMetastoreConcurrentClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { - int32_t seqid = send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts); - recv_get_partitions_ps(_return, seqid); + int32_t seqid = send_alter_partition(db_name, tbl_name, new_part); + recv_alter_partition(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_ps", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_ps_pargs args; + ThriftHiveMetastore_alter_partition_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.max_parts = &max_parts; + args.new_part = &new_part; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65671,7 +68456,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps(const std::s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seqid) { int32_t rseqid = 0; @@ -65700,7 +68485,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_ps") != 0) { + if (fname.compare("alter_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65709,17 +68494,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -65728,8 +68507,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -65739,25 +68518,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreConcurrentClient::alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) { - int32_t seqid = send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names); - recv_get_partitions_ps_with_auth(_return, seqid); + int32_t seqid = send_alter_partitions(db_name, tbl_name, new_parts); + recv_alter_partitions(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_ps_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_ps_with_auth_pargs args; + ThriftHiveMetastore_alter_partitions_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.max_parts = &max_parts; - args.user_name = &user_name; - args.group_names = &group_names; + args.new_parts = &new_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65768,7 +68544,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps_with_auth(co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t seqid) { int32_t rseqid = 0; @@ -65797,7 +68573,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std:: iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_ps_with_auth") != 0) { + if (fname.compare("alter_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65806,17 +68582,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std:: using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_ps_with_auth_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_partitions_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -65825,8 +68595,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std:: sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_ps_with_auth failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65836,23 +68606,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std:: } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +void ThriftHiveMetastoreConcurrentClient::alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts); - recv_get_partition_names_ps(_return, seqid); + int32_t seqid = send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); + recv_alter_partitions_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_names_ps", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_names_ps_pargs args; + ThriftHiveMetastore_alter_partitions_with_environment_context_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.max_parts = &max_parts; + args.new_parts = &new_parts; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65863,7 +68633,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names_ps(const s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -65892,7 +68662,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vecto iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_names_ps") != 0) { + if (fname.compare("alter_partitions_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65901,17 +68671,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vecto using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_names_ps_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_partitions_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -65920,8 +68684,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vecto sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names_ps failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65931,23 +68695,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vecto } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) +void ThriftHiveMetastoreConcurrentClient::alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts); - recv_get_partitions_by_filter(_return, seqid); + int32_t seqid = send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context); + recv_alter_partition_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_by_filter_pargs args; + ThriftHiveMetastore_alter_partition_with_environment_context_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.filter = &filter; - args.max_parts = &max_parts; + args.new_part = &new_part; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65958,7 +68722,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_filter(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -65987,7 +68751,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vec iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_by_filter") != 0) { + if (fname.compare("alter_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65996,17 +68760,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vec using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_by_filter_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_partition_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -66015,8 +68773,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vec sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_filter failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66026,23 +68784,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vec } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_part_specs_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) +void ThriftHiveMetastoreConcurrentClient::rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { - int32_t seqid = send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts); - recv_get_part_specs_by_filter(_return, seqid); + int32_t seqid = send_rename_partition(db_name, tbl_name, part_vals, new_part); + recv_rename_partition(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_part_specs_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("rename_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_part_specs_by_filter_pargs args; + ThriftHiveMetastore_rename_partition_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.filter = &filter; - args.max_parts = &max_parts; + args.part_vals = &part_vals; + args.new_part = &new_part; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66053,7 +68811,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_part_specs_by_filter(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t seqid) { int32_t rseqid = 0; @@ -66082,7 +68840,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vec iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_part_specs_by_filter") != 0) { + if (fname.compare("rename_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66091,17 +68849,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vec using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_part_specs_by_filter_presult result; - result.success = &_return; + ThriftHiveMetastore_rename_partition_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -66110,8 +68862,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vec sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_part_specs_by_filter failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66121,20 +68873,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vec } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req) +bool ThriftHiveMetastoreConcurrentClient::partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) { - int32_t seqid = send_get_partitions_by_expr(req); - recv_get_partitions_by_expr(_return, seqid); + int32_t seqid = send_partition_name_has_valid_characters(part_vals, throw_exception); + return recv_partition_name_has_valid_characters(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_expr(const PartitionsByExprRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("partition_name_has_valid_characters", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_by_expr_pargs args; - args.req = &req; + ThriftHiveMetastore_partition_name_has_valid_characters_pargs args; + args.part_vals = &part_vals; + args.throw_exception = &throw_exception; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66145,7 +68898,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_expr(const P return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(PartitionsByExprResult& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characters(const int32_t seqid) { int32_t rseqid = 0; @@ -66174,7 +68927,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(Partitions iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_by_expr") != 0) { + if (fname.compare("partition_name_has_valid_characters") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66183,27 +68936,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(Partitions using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_by_expr_presult result; + bool _return; + ThriftHiveMetastore_partition_name_has_valid_characters_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_expr failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_has_valid_characters failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66213,22 +68962,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(Partitions } // end while(true) } -int32_t ThriftHiveMetastoreConcurrentClient::get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) +void ThriftHiveMetastoreConcurrentClient::get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) { - int32_t seqid = send_get_num_partitions_by_filter(db_name, tbl_name, filter); - return recv_get_num_partitions_by_filter(seqid); + int32_t seqid = send_get_config_value(name, defaultValue); + recv_get_config_value(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_config_value(const std::string& name, const std::string& defaultValue) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_num_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_config_value", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_num_partitions_by_filter_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.filter = &filter; + ThriftHiveMetastore_get_config_value_pargs args; + args.name = &name; + args.defaultValue = &defaultValue; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66239,7 +68987,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_num_partitions_by_filter(c return cseqid; } -int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66268,7 +69016,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_num_partitions_by_filter") != 0) { + if (fname.compare("get_config_value") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66277,27 +69025,23 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int32_t _return; - ThriftHiveMetastore_get_num_partitions_by_filter_presult result; + ThriftHiveMetastore_get_config_value_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_num_partitions_by_filter failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_config_value failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66307,22 +69051,20 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(c } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) +void ThriftHiveMetastoreConcurrentClient::partition_name_to_vals(std::vector & _return, const std::string& part_name) { - int32_t seqid = send_get_partitions_by_names(db_name, tbl_name, names); - recv_get_partitions_by_names(_return, seqid); + int32_t seqid = send_partition_name_to_vals(part_name); + recv_partition_name_to_vals(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names) +int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_vals(const std::string& part_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("partition_name_to_vals", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_by_names_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.names = &names; + ThriftHiveMetastore_partition_name_to_vals_pargs args; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66333,7 +69075,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_names(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66362,7 +69104,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vect iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_by_names") != 0) { + if (fname.compare("partition_name_to_vals") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66371,7 +69113,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vect using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_by_names_presult result; + ThriftHiveMetastore_partition_name_to_vals_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -66386,12 +69128,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vect sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_names failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_to_vals failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66401,22 +69139,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vect } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) +void ThriftHiveMetastoreConcurrentClient::partition_name_to_spec(std::map & _return, const std::string& part_name) { - int32_t seqid = send_alter_partition(db_name, tbl_name, new_part); - recv_alter_partition(seqid); + int32_t seqid = send_partition_name_to_spec(part_name); + recv_partition_name_to_spec(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) +int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_spec(const std::string& part_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("partition_name_to_spec", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partition_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.new_part = &new_part; + ThriftHiveMetastore_partition_name_to_spec_pargs args; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66427,7 +69163,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition(const std::str return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::map & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66456,7 +69192,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partition") != 0) { + if (fname.compare("partition_name_to_spec") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66465,21 +69201,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_partition_presult result; + ThriftHiveMetastore_partition_name_to_spec_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_to_spec failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66489,22 +69227,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) +void ThriftHiveMetastoreConcurrentClient::markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { - int32_t seqid = send_alter_partitions(db_name, tbl_name, new_parts); - recv_alter_partitions(seqid); + int32_t seqid = send_markPartitionForEvent(db_name, tbl_name, part_vals, eventType); + recv_markPartitionForEvent(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("markPartitionForEvent", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partitions_pargs args; + ThriftHiveMetastore_markPartitionForEvent_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.new_parts = &new_parts; + args.part_vals = &part_vals; + args.eventType = &eventType; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66515,7 +69254,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions(const std::st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32_t seqid) { int32_t rseqid = 0; @@ -66544,7 +69283,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t se iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partitions") != 0) { + if (fname.compare("markPartitionForEvent") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66553,7 +69292,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t se using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_partitions_presult result; + ThriftHiveMetastore_markPartitionForEvent_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66566,6 +69305,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t se sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + if (result.__isset.o5) { + sentry.commit(); + throw result.o5; + } + if (result.__isset.o6) { + sentry.commit(); + throw result.o6; + } sentry.commit(); return; } @@ -66577,23 +69332,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t se } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +bool ThriftHiveMetastoreConcurrentClient::isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { - int32_t seqid = send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); - recv_alter_partitions_with_environment_context(seqid); + int32_t seqid = send_isPartitionMarkedForEvent(db_name, tbl_name, part_vals, eventType); + return recv_isPartitionMarkedForEvent(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("isPartitionMarkedForEvent", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partitions_with_environment_context_pargs args; + ThriftHiveMetastore_isPartitionMarkedForEvent_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.new_parts = &new_parts; - args.environment_context = &environment_context; + args.part_vals = &part_vals; + args.eventType = &eventType; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66604,7 +69359,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions_with_environm return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment_context(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const int32_t seqid) { int32_t rseqid = 0; @@ -66633,7 +69388,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partitions_with_environment_context") != 0) { + if (fname.compare("isPartitionMarkedForEvent") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66642,11 +69397,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_partitions_with_environment_context_presult result; + bool _return; + ThriftHiveMetastore_isPartitionMarkedForEvent_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + sentry.commit(); + return _return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -66655,8 +69416,24 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + if (result.__isset.o5) { + sentry.commit(); + throw result.o5; + } + if (result.__isset.o6) { + sentry.commit(); + throw result.o6; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "isPartitionMarkedForEvent failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66666,23 +69443,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::add_index(Index& _return, const Index& new_index, const Table& index_table) { - int32_t seqid = send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context); - recv_alter_partition_with_environment_context(seqid); + int32_t seqid = send_add_index(new_index, index_table); + recv_add_index(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_index(const Index& new_index, const Table& index_table) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_index", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partition_with_environment_context_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.new_part = &new_part; - args.environment_context = &environment_context; + ThriftHiveMetastore_add_index_pargs args; + args.new_index = &new_index; + args.index_table = &index_table; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66693,7 +69468,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition_with_environme return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66722,7 +69497,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_ iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partition_with_environment_context") != 0) { + if (fname.compare("add_index") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66731,11 +69506,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_ using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_partition_with_environment_context_presult result; + ThriftHiveMetastore_add_index_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -66744,8 +69525,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_ sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_index failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66755,23 +69540,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_ } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) +void ThriftHiveMetastoreConcurrentClient::alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) { - int32_t seqid = send_rename_partition(db_name, tbl_name, part_vals, new_part); - recv_rename_partition(seqid); + int32_t seqid = send_alter_index(dbname, base_tbl_name, idx_name, new_idx); + recv_alter_index(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("rename_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_index", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_rename_partition_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.new_part = &new_part; + ThriftHiveMetastore_alter_index_pargs args; + args.dbname = &dbname; + args.base_tbl_name = &base_tbl_name; + args.idx_name = &idx_name; + args.new_idx = &new_idx; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66782,7 +69567,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_rename_partition(const std::st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) { int32_t rseqid = 0; @@ -66811,7 +69596,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t se iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("rename_partition") != 0) { + if (fname.compare("alter_index") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66820,7 +69605,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t se using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_rename_partition_presult result; + ThriftHiveMetastore_alter_index_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66844,21 +69629,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t se } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) +bool ThriftHiveMetastoreConcurrentClient::drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) { - int32_t seqid = send_partition_name_has_valid_characters(part_vals, throw_exception); - return recv_partition_name_has_valid_characters(seqid); + int32_t seqid = send_drop_index_by_name(db_name, tbl_name, index_name, deleteData); + return recv_drop_index_by_name(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("partition_name_has_valid_characters", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_partition_name_has_valid_characters_pargs args; - args.part_vals = &part_vals; - args.throw_exception = &throw_exception; + ThriftHiveMetastore_drop_index_by_name_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.index_name = &index_name; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66869,7 +69656,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_has_valid_chara return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characters(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t seqid) { int32_t rseqid = 0; @@ -66898,7 +69685,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characte iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("partition_name_has_valid_characters") != 0) { + if (fname.compare("drop_index_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66908,7 +69695,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characte throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_partition_name_has_valid_characters_presult result; + ThriftHiveMetastore_drop_index_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -66922,8 +69709,12 @@ bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characte sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_has_valid_characters failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_index_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66933,21 +69724,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characte } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) +void ThriftHiveMetastoreConcurrentClient::get_index_by_name(Index& _return, const std::string& db_name, const std::string& tbl_name, const std::string& index_name) { - int32_t seqid = send_get_config_value(name, defaultValue); - recv_get_config_value(_return, seqid); + int32_t seqid = send_get_index_by_name(db_name, tbl_name, index_name); + recv_get_index_by_name(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_config_value(const std::string& name, const std::string& defaultValue) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_config_value", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_config_value_pargs args; - args.name = &name; - args.defaultValue = &defaultValue; + ThriftHiveMetastore_get_index_by_name_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.index_name = &index_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66958,7 +69750,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_config_value(const std::st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66987,7 +69779,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_config_value") != 0) { + if (fname.compare("get_index_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66996,7 +69788,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_config_value_presult result; + ThriftHiveMetastore_get_index_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67011,8 +69803,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _re sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_config_value failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67022,20 +69818,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _re } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::partition_name_to_vals(std::vector & _return, const std::string& part_name) +void ThriftHiveMetastoreConcurrentClient::get_indexes(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { - int32_t seqid = send_partition_name_to_vals(part_name); - recv_partition_name_to_vals(_return, seqid); + int32_t seqid = send_get_indexes(db_name, tbl_name, max_indexes); + recv_get_indexes(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_vals(const std::string& part_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_indexes(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("partition_name_to_vals", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_indexes", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_partition_name_to_vals_pargs args; - args.part_name = &part_name; + ThriftHiveMetastore_get_indexes_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.max_indexes = &max_indexes; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67046,7 +69844,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_vals(const s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67075,7 +69873,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vecto iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("partition_name_to_vals") != 0) { + if (fname.compare("get_indexes") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67084,7 +69882,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vecto using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_partition_name_to_vals_presult result; + ThriftHiveMetastore_get_indexes_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67099,8 +69897,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vecto sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_to_vals failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_indexes failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67110,20 +69912,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vecto } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::partition_name_to_spec(std::map & _return, const std::string& part_name) +void ThriftHiveMetastoreConcurrentClient::get_index_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { - int32_t seqid = send_partition_name_to_spec(part_name); - recv_partition_name_to_spec(_return, seqid); + int32_t seqid = send_get_index_names(db_name, tbl_name, max_indexes); + recv_get_index_names(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_spec(const std::string& part_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("partition_name_to_spec", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_index_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_partition_name_to_spec_pargs args; - args.part_name = &part_name; + ThriftHiveMetastore_get_index_names_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.max_indexes = &max_indexes; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67134,7 +69938,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_spec(const s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::map & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_index_names(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67163,7 +69967,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::mapreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("partition_name_to_spec") != 0) { + if (fname.compare("get_index_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67172,7 +69976,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::mapreadMessageEnd(); @@ -67183,12 +69987,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::mapsync_.updatePending(fname, mtype, rseqid); @@ -67198,23 +70002,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::map & part_vals, const PartitionEventType::type eventType) +void ThriftHiveMetastoreConcurrentClient::get_primary_keys(PrimaryKeysResponse& _return, const PrimaryKeysRequest& request) { - int32_t seqid = send_markPartitionForEvent(db_name, tbl_name, part_vals, eventType); - recv_markPartitionForEvent(seqid); + int32_t seqid = send_get_primary_keys(request); + recv_get_primary_keys(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_primary_keys(const PrimaryKeysRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("markPartitionForEvent", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_markPartitionForEvent_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.eventType = &eventType; + ThriftHiveMetastore_get_primary_keys_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67225,7 +70026,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_markPartitionForEvent(const st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67254,7 +70055,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32 iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("markPartitionForEvent") != 0) { + if (fname.compare("get_primary_keys") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67263,11 +70064,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32 using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_markPartitionForEvent_presult result; + ThriftHiveMetastore_get_primary_keys_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -67276,24 +70083,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32 sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } - if (result.__isset.o5) { - sentry.commit(); - throw result.o5; - } - if (result.__isset.o6) { - sentry.commit(); - throw result.o6; - } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_primary_keys failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67303,23 +70094,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32 } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) +void ThriftHiveMetastoreConcurrentClient::get_foreign_keys(ForeignKeysResponse& _return, const ForeignKeysRequest& request) { - int32_t seqid = send_isPartitionMarkedForEvent(db_name, tbl_name, part_vals, eventType); - return recv_isPartitionMarkedForEvent(seqid); + int32_t seqid = send_get_foreign_keys(request); + recv_get_foreign_keys(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_foreign_keys(const ForeignKeysRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("isPartitionMarkedForEvent", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_isPartitionMarkedForEvent_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.eventType = &eventType; + ThriftHiveMetastore_get_foreign_keys_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67330,7 +70118,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_isPartitionMarkedForEvent(cons return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67359,7 +70147,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const i iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("isPartitionMarkedForEvent") != 0) { + if (fname.compare("get_foreign_keys") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67368,16 +70156,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const i using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_isPartitionMarkedForEvent_presult result; + ThriftHiveMetastore_get_foreign_keys_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -67387,24 +70175,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const i sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } - if (result.__isset.o5) { - sentry.commit(); - throw result.o5; - } - if (result.__isset.o6) { - sentry.commit(); - throw result.o6; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "isPartitionMarkedForEvent failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_foreign_keys failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67414,21 +70186,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const i } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_index(Index& _return, const Index& new_index, const Table& index_table) +void ThriftHiveMetastoreConcurrentClient::get_unique_constraints(UniqueConstraintsResponse& _return, const UniqueConstraintsRequest& request) { - int32_t seqid = send_add_index(new_index, index_table); - recv_add_index(_return, seqid); + int32_t seqid = send_get_unique_constraints(request); + recv_get_unique_constraints(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_index(const Index& new_index, const Table& index_table) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_unique_constraints(const UniqueConstraintsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_index", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_unique_constraints", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_index_pargs args; - args.new_index = &new_index; - args.index_table = &index_table; + ThriftHiveMetastore_get_unique_constraints_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67439,7 +70210,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_index(const Index& new_ind return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_unique_constraints(UniqueConstraintsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67468,7 +70239,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const i iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_index") != 0) { + if (fname.compare("get_unique_constraints") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67477,7 +70248,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const i using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_index_presult result; + ThriftHiveMetastore_get_unique_constraints_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67496,12 +70267,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const i sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_index failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_unique_constraints failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67511,23 +70278,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const i } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) +void ThriftHiveMetastoreConcurrentClient::get_not_null_constraints(NotNullConstraintsResponse& _return, const NotNullConstraintsRequest& request) { - int32_t seqid = send_alter_index(dbname, base_tbl_name, idx_name, new_idx); - recv_alter_index(seqid); + int32_t seqid = send_get_not_null_constraints(request); + recv_get_not_null_constraints(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_not_null_constraints(const NotNullConstraintsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_index", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_not_null_constraints", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_index_pargs args; - args.dbname = &dbname; - args.base_tbl_name = &base_tbl_name; - args.idx_name = &idx_name; - args.new_idx = &new_idx; + ThriftHiveMetastore_get_not_null_constraints_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67538,7 +70302,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_index(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_not_null_constraints(NotNullConstraintsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67567,7 +70331,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_index") != 0) { + if (fname.compare("get_not_null_constraints") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67576,11 +70340,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_index_presult result; + ThriftHiveMetastore_get_not_null_constraints_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -67589,8 +70359,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) sentry.commit(); throw result.o2; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_not_null_constraints failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67600,23 +70370,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) +bool ThriftHiveMetastoreConcurrentClient::update_table_column_statistics(const ColumnStatistics& stats_obj) { - int32_t seqid = send_drop_index_by_name(db_name, tbl_name, index_name, deleteData); - return recv_drop_index_by_name(seqid); + int32_t seqid = send_update_table_column_statistics(stats_obj); + return recv_update_table_column_statistics(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) +int32_t ThriftHiveMetastoreConcurrentClient::send_update_table_column_statistics(const ColumnStatistics& stats_obj) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("update_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_index_by_name_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.index_name = &index_name; - args.deleteData = &deleteData; + ThriftHiveMetastore_update_table_column_statistics_pargs args; + args.stats_obj = &stats_obj; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67627,7 +70394,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_index_by_name(const std:: return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(const int32_t seqid) { int32_t rseqid = 0; @@ -67656,7 +70423,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_index_by_name") != 0) { + if (fname.compare("update_table_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67666,7 +70433,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_drop_index_by_name_presult result; + ThriftHiveMetastore_update_table_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67684,8 +70451,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_index_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_table_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67695,22 +70470,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_index_by_name(Index& _return, const std::string& db_name, const std::string& tbl_name, const std::string& index_name) +bool ThriftHiveMetastoreConcurrentClient::update_partition_column_statistics(const ColumnStatistics& stats_obj) { - int32_t seqid = send_get_index_by_name(db_name, tbl_name, index_name); - recv_get_index_by_name(_return, seqid); + int32_t seqid = send_update_partition_column_statistics(stats_obj); + return recv_update_partition_column_statistics(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_update_partition_column_statistics(const ColumnStatistics& stats_obj) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("update_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_index_by_name_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.index_name = &index_name; + ThriftHiveMetastore_update_partition_column_statistics_pargs args; + args.stats_obj = &stats_obj; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67721,7 +70494,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_by_name(const std::s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistics(const int32_t seqid) { int32_t rseqid = 0; @@ -67750,7 +70523,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_index_by_name") != 0) { + if (fname.compare("update_partition_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67759,16 +70532,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_index_by_name_presult result; + bool _return; + ThriftHiveMetastore_update_partition_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -67778,8 +70551,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_partition_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67789,22 +70570,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_indexes(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) +void ThriftHiveMetastoreConcurrentClient::get_table_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& col_name) { - int32_t seqid = send_get_indexes(db_name, tbl_name, max_indexes); - recv_get_indexes(_return, seqid); + int32_t seqid = send_get_table_column_statistics(db_name, tbl_name, col_name); + recv_get_table_column_statistics(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_indexes(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_indexes", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_indexes_pargs args; + ThriftHiveMetastore_get_table_column_statistics_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.max_indexes = &max_indexes; + args.col_name = &col_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67815,7 +70596,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_indexes(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(ColumnStatistics& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67844,7 +70625,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_indexes") != 0) { + if (fname.compare("get_table_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67853,7 +70634,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_indexes_presult result; + ThriftHiveMetastore_get_table_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67872,98 +70653,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_indexes failed: unknown result"); - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::get_index_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) -{ - int32_t seqid = send_get_index_names(db_name, tbl_name, max_indexes); - recv_get_index_names(_return, seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_index_names", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_get_index_names_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.max_indexes = &max_indexes; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -void ThriftHiveMetastoreConcurrentClient::recv_get_index_names(std::vector & _return, const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_index_names") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - ThriftHiveMetastore_get_index_names_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o3) { sentry.commit(); - return; + throw result.o3; } - if (result.__isset.o2) { + if (result.__isset.o4) { sentry.commit(); - throw result.o2; + throw result.o4; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_names failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67973,20 +70672,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_names(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_primary_keys_pargs args; - args.request = &request; + ThriftHiveMetastore_get_partition_column_statistics_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_name = &part_name; + args.col_name = &col_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67997,7 +70699,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_primary_keys(const Primary return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(ColumnStatistics& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68026,7 +70728,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysRespo iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_primary_keys") != 0) { + if (fname.compare("get_partition_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68035,7 +70737,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysRespo using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_primary_keys_presult result; + ThriftHiveMetastore_get_partition_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68054,8 +70756,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysRespo sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_primary_keys failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68065,19 +70775,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysRespo } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_foreign_keys(ForeignKeysResponse& _return, const ForeignKeysRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_table_statistics_req(TableStatsResult& _return, const TableStatsRequest& request) { - int32_t seqid = send_get_foreign_keys(request); - recv_get_foreign_keys(_return, seqid); + int32_t seqid = send_get_table_statistics_req(request); + recv_get_table_statistics_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_foreign_keys(const ForeignKeysRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_statistics_req(const TableStatsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_foreign_keys_pargs args; + ThriftHiveMetastore_get_table_statistics_req_pargs args; args.request = &request; args.write(oprot_); @@ -68089,7 +70799,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_foreign_keys(const Foreign return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableStatsResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68118,7 +70828,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysRespo iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_foreign_keys") != 0) { + if (fname.compare("get_table_statistics_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68127,7 +70837,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysRespo using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_foreign_keys_presult result; + ThriftHiveMetastore_get_table_statistics_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68147,7 +70857,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysRespo throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_foreign_keys failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_statistics_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68157,19 +70867,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysRespo } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_unique_constraints(UniqueConstraintsResponse& _return, const UniqueConstraintsRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_partitions_statistics_req(PartitionsStatsResult& _return, const PartitionsStatsRequest& request) { - int32_t seqid = send_get_unique_constraints(request); - recv_get_unique_constraints(_return, seqid); + int32_t seqid = send_get_partitions_statistics_req(request); + recv_get_partitions_statistics_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_unique_constraints(const UniqueConstraintsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_statistics_req(const PartitionsStatsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_unique_constraints", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_unique_constraints_pargs args; + ThriftHiveMetastore_get_partitions_statistics_req_pargs args; args.request = &request; args.write(oprot_); @@ -68181,7 +70891,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_unique_constraints(const U return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_unique_constraints(UniqueConstraintsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(PartitionsStatsResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68210,7 +70920,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_unique_constraints(UniqueCons iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_unique_constraints") != 0) { + if (fname.compare("get_partitions_statistics_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68219,7 +70929,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_unique_constraints(UniqueCons using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_unique_constraints_presult result; + ThriftHiveMetastore_get_partitions_statistics_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68239,7 +70949,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_unique_constraints(UniqueCons throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_unique_constraints failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_statistics_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68249,19 +70959,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_unique_constraints(UniqueCons } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_not_null_constraints(NotNullConstraintsResponse& _return, const NotNullConstraintsRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_aggr_stats_for(AggrStats& _return, const PartitionsStatsRequest& request) { - int32_t seqid = send_get_not_null_constraints(request); - recv_get_not_null_constraints(_return, seqid); + int32_t seqid = send_get_aggr_stats_for(request); + recv_get_aggr_stats_for(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_not_null_constraints(const NotNullConstraintsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_aggr_stats_for(const PartitionsStatsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_not_null_constraints", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_not_null_constraints_pargs args; + ThriftHiveMetastore_get_aggr_stats_for_pargs args; args.request = &request; args.write(oprot_); @@ -68273,7 +70983,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_not_null_constraints(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_not_null_constraints(NotNullConstraintsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68302,7 +71012,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_not_null_constraints(NotNullC iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_not_null_constraints") != 0) { + if (fname.compare("get_aggr_stats_for") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68311,7 +71021,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_not_null_constraints(NotNullC using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_not_null_constraints_presult result; + ThriftHiveMetastore_get_aggr_stats_for_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68331,7 +71041,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_not_null_constraints(NotNullC throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_not_null_constraints failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_aggr_stats_for failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68341,20 +71051,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_not_null_constraints(NotNullC } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::update_table_column_statistics(const ColumnStatistics& stats_obj) +bool ThriftHiveMetastoreConcurrentClient::set_aggr_stats_for(const SetPartitionsStatsRequest& request) { - int32_t seqid = send_update_table_column_statistics(stats_obj); - return recv_update_table_column_statistics(seqid); + int32_t seqid = send_set_aggr_stats_for(request); + return recv_set_aggr_stats_for(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_update_table_column_statistics(const ColumnStatistics& stats_obj) +int32_t ThriftHiveMetastoreConcurrentClient::send_set_aggr_stats_for(const SetPartitionsStatsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("update_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_update_table_column_statistics_pargs args; - args.stats_obj = &stats_obj; + ThriftHiveMetastore_set_aggr_stats_for_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68365,7 +71075,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_update_table_column_statistics return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t seqid) { int32_t rseqid = 0; @@ -68394,7 +71104,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(co iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("update_table_column_statistics") != 0) { + if (fname.compare("set_aggr_stats_for") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68404,7 +71114,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(co throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_update_table_column_statistics_presult result; + ThriftHiveMetastore_set_aggr_stats_for_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68431,7 +71141,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(co throw result.o4; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_table_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_aggr_stats_for failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68441,20 +71151,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(co } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::update_partition_column_statistics(const ColumnStatistics& stats_obj) +bool ThriftHiveMetastoreConcurrentClient::delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { - int32_t seqid = send_update_partition_column_statistics(stats_obj); - return recv_update_partition_column_statistics(seqid); + int32_t seqid = send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name); + return recv_delete_partition_column_statistics(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_update_partition_column_statistics(const ColumnStatistics& stats_obj) +int32_t ThriftHiveMetastoreConcurrentClient::send_delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("update_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("delete_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_update_partition_column_statistics_pargs args; - args.stats_obj = &stats_obj; + ThriftHiveMetastore_delete_partition_column_statistics_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_name = &part_name; + args.col_name = &col_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68465,7 +71178,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_update_partition_column_statis return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistics(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistics(const int32_t seqid) { int32_t rseqid = 0; @@ -68494,7 +71207,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistic iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("update_partition_column_statistics") != 0) { + if (fname.compare("delete_partition_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68504,7 +71217,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistic throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_update_partition_column_statistics_presult result; + ThriftHiveMetastore_delete_partition_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68531,7 +71244,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistic throw result.o4; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_partition_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_partition_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68541,19 +71254,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistic } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +bool ThriftHiveMetastoreConcurrentClient::delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) { - int32_t seqid = send_get_table_column_statistics(db_name, tbl_name, col_name); - recv_get_table_column_statistics(_return, seqid); + int32_t seqid = send_delete_table_column_statistics(db_name, tbl_name, col_name); + return recv_delete_table_column_statistics(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("delete_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_column_statistics_pargs args; + ThriftHiveMetastore_delete_table_column_statistics_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.col_name = &col_name; @@ -68567,7 +71280,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_column_statistics(co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(ColumnStatistics& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(const int32_t seqid) { int32_t rseqid = 0; @@ -68596,7 +71309,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(Colum iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_column_statistics") != 0) { + if (fname.compare("delete_table_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68605,16 +71318,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(Colum using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_column_statistics_presult result; + bool _return; + ThriftHiveMetastore_delete_table_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -68633,7 +71346,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(Colum throw result.o4; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_table_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68643,23 +71356,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(Colum } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partition_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +void ThriftHiveMetastoreConcurrentClient::create_function(const Function& func) { - int32_t seqid = send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name); - recv_get_partition_column_statistics(_return, seqid); + int32_t seqid = send_create_function(func); + recv_create_function(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_function(const Function& func) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_column_statistics_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.col_name = &col_name; + ThriftHiveMetastore_create_function_pargs args; + args.func = &func; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68670,7 +71380,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_column_statistic return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(ColumnStatistics& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seqid) { int32_t rseqid = 0; @@ -68699,7 +71409,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(C iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_column_statistics") != 0) { + if (fname.compare("create_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68708,17 +71418,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(C using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_column_statistics_presult result; - result.success = &_return; + ThriftHiveMetastore_create_function_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -68735,8 +71439,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(C sentry.commit(); throw result.o4; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_column_statistics failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68746,20 +71450,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(C } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_statistics_req(TableStatsResult& _return, const TableStatsRequest& request) +void ThriftHiveMetastoreConcurrentClient::drop_function(const std::string& dbName, const std::string& funcName) { - int32_t seqid = send_get_table_statistics_req(request); - recv_get_table_statistics_req(_return, seqid); + int32_t seqid = send_drop_function(dbName, funcName); + recv_drop_function(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_statistics_req(const TableStatsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_function(const std::string& dbName, const std::string& funcName) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_statistics_req_pargs args; - args.request = &request; + ThriftHiveMetastore_drop_function_pargs args; + args.dbName = &dbName; + args.funcName = &funcName; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68770,7 +71475,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_statistics_req(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableStatsResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid) { int32_t rseqid = 0; @@ -68799,7 +71504,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableSta iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_statistics_req") != 0) { + if (fname.compare("drop_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68808,17 +71513,99 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableSta using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_statistics_req_presult result; - result.success = &_return; + ThriftHiveMetastore_drop_function_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) +{ + int32_t seqid = send_alter_function(dbName, funcName, newFunc); + recv_alter_function(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("alter_function", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_alter_function_pargs args; + args.dbName = &dbName; + args.funcName = &funcName; + args.newFunc = &newFunc; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("alter_function") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_alter_function_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -68827,8 +71614,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableSta sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_statistics_req failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68838,20 +71625,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableSta } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_statistics_req(PartitionsStatsResult& _return, const PartitionsStatsRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_functions(std::vector & _return, const std::string& dbName, const std::string& pattern) { - int32_t seqid = send_get_partitions_statistics_req(request); - recv_get_partitions_statistics_req(_return, seqid); + int32_t seqid = send_get_functions(dbName, pattern); + recv_get_functions(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_statistics_req(const PartitionsStatsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_functions(const std::string& dbName, const std::string& pattern) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_functions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_statistics_req_pargs args; - args.request = &request; + ThriftHiveMetastore_get_functions_pargs args; + args.dbName = &dbName; + args.pattern = &pattern; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68862,7 +71650,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_statistics_req( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(PartitionsStatsResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68891,7 +71679,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(Par iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_statistics_req") != 0) { + if (fname.compare("get_functions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68900,7 +71688,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(Par using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_statistics_req_presult result; + ThriftHiveMetastore_get_functions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68915,12 +71703,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(Par sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_statistics_req failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_functions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68930,20 +71714,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(Par } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_aggr_stats_for(AggrStats& _return, const PartitionsStatsRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_function(Function& _return, const std::string& dbName, const std::string& funcName) { - int32_t seqid = send_get_aggr_stats_for(request); - recv_get_aggr_stats_for(_return, seqid); + int32_t seqid = send_get_function(dbName, funcName); + recv_get_function(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_aggr_stats_for(const PartitionsStatsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_function(const std::string& dbName, const std::string& funcName) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_aggr_stats_for_pargs args; - args.request = &request; + ThriftHiveMetastore_get_function_pargs args; + args.dbName = &dbName; + args.funcName = &funcName; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68954,7 +71739,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_aggr_stats_for(const Parti return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68983,7 +71768,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_aggr_stats_for") != 0) { + if (fname.compare("get_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68992,7 +71777,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_aggr_stats_for_presult result; + ThriftHiveMetastore_get_function_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -69012,7 +71797,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _re throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_aggr_stats_for failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_function failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69022,20 +71807,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _re } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::set_aggr_stats_for(const SetPartitionsStatsRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_all_functions(GetAllFunctionsResponse& _return) { - int32_t seqid = send_set_aggr_stats_for(request); - return recv_set_aggr_stats_for(seqid); + int32_t seqid = send_get_all_functions(); + recv_get_all_functions(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_set_aggr_stats_for(const SetPartitionsStatsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_functions() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_functions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_set_aggr_stats_for_pargs args; - args.request = &request; + ThriftHiveMetastore_get_all_functions_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69046,7 +71830,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_set_aggr_stats_for(const SetPa return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctionsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69075,7 +71859,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("set_aggr_stats_for") != 0) { + if (fname.compare("get_all_functions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69084,35 +71868,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_set_aggr_stats_for_presult result; + ThriftHiveMetastore_get_all_functions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_aggr_stats_for failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_functions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69122,23 +71894,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +bool ThriftHiveMetastoreConcurrentClient::create_role(const Role& role) { - int32_t seqid = send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name); - return recv_delete_partition_column_statistics(seqid); + int32_t seqid = send_create_role(role); + return recv_create_role(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_role(const Role& role) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("delete_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_delete_partition_column_statistics_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.col_name = &col_name; + ThriftHiveMetastore_create_role_pargs args; + args.role = &role; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69149,7 +71918,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_delete_partition_column_statis return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistics(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_create_role(const int32_t seqid) { int32_t rseqid = 0; @@ -69178,7 +71947,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistic iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("delete_partition_column_statistics") != 0) { + if (fname.compare("create_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69188,7 +71957,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistic throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_delete_partition_column_statistics_presult result; + ThriftHiveMetastore_create_role_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -69202,20 +71971,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistic sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_partition_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69225,22 +71982,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistic } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +bool ThriftHiveMetastoreConcurrentClient::drop_role(const std::string& role_name) { - int32_t seqid = send_delete_table_column_statistics(db_name, tbl_name, col_name); - return recv_delete_table_column_statistics(seqid); + int32_t seqid = send_drop_role(role_name); + return recv_drop_role(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_role(const std::string& role_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("delete_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_delete_table_column_statistics_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.col_name = &col_name; + ThriftHiveMetastore_drop_role_pargs args; + args.role_name = &role_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69251,7 +72006,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_delete_table_column_statistics return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) { int32_t rseqid = 0; @@ -69280,7 +72035,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(co iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("delete_table_column_statistics") != 0) { + if (fname.compare("drop_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69290,7 +72045,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(co throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_delete_table_column_statistics_presult result; + ThriftHiveMetastore_drop_role_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -69304,20 +72059,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(co sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_table_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69327,20 +72070,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(co } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_function(const Function& func) +void ThriftHiveMetastoreConcurrentClient::get_role_names(std::vector & _return) { - int32_t seqid = send_create_function(func); - recv_create_function(seqid); + int32_t seqid = send_get_role_names(); + recv_get_role_names(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_function(const Function& func) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_names() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_role_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_function_pargs args; - args.func = &func; + ThriftHiveMetastore_get_role_names_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69351,7 +72093,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_function(const Function return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69380,7 +72122,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_function") != 0) { + if (fname.compare("get_role_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69389,29 +72131,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_function_presult result; + ThriftHiveMetastore_get_role_names_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o3; + return; } - if (result.__isset.o4) { + if (result.__isset.o1) { sentry.commit(); - throw result.o4; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_role_names failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69421,21 +72157,25 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_function(const std::string& dbName, const std::string& funcName) +bool ThriftHiveMetastoreConcurrentClient::grant_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type, const std::string& grantor, const PrincipalType::type grantorType, const bool grant_option) { - int32_t seqid = send_drop_function(dbName, funcName); - recv_drop_function(seqid); + int32_t seqid = send_grant_role(role_name, principal_name, principal_type, grantor, grantorType, grant_option); + return recv_grant_role(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_function(const std::string& dbName, const std::string& funcName) +int32_t ThriftHiveMetastoreConcurrentClient::send_grant_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type, const std::string& grantor, const PrincipalType::type grantorType, const bool grant_option) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_function_pargs args; - args.dbName = &dbName; - args.funcName = &funcName; + ThriftHiveMetastore_grant_role_pargs args; + args.role_name = &role_name; + args.principal_name = &principal_name; + args.principal_type = &principal_type; + args.grantor = &grantor; + args.grantorType = &grantorType; + args.grant_option = &grant_option; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69446,7 +72186,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_function(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_grant_role(const int32_t seqid) { int32_t rseqid = 0; @@ -69475,7 +72215,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_function") != 0) { + if (fname.compare("grant_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69484,21 +72224,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_function_presult result; + bool _return; + ThriftHiveMetastore_grant_role_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { sentry.commit(); - throw result.o1; + return _return; } - if (result.__isset.o3) { + if (result.__isset.o1) { sentry.commit(); - throw result.o3; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69508,22 +72250,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) +bool ThriftHiveMetastoreConcurrentClient::revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) { - int32_t seqid = send_alter_function(dbName, funcName, newFunc); - recv_alter_function(seqid); + int32_t seqid = send_revoke_role(role_name, principal_name, principal_type); + return recv_revoke_role(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) +int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_function_pargs args; - args.dbName = &dbName; - args.funcName = &funcName; - args.newFunc = &newFunc; + ThriftHiveMetastore_revoke_role_pargs args; + args.role_name = &role_name; + args.principal_name = &principal_name; + args.principal_type = &principal_type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69534,7 +72276,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_function(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) { int32_t rseqid = 0; @@ -69563,7 +72305,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqi iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_function") != 0) { + if (fname.compare("revoke_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69572,21 +72314,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqi using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_function_presult result; + bool _return; + ThriftHiveMetastore_revoke_role_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { sentry.commit(); - throw result.o1; + return _return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69596,21 +72340,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqi } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_functions(std::vector & _return, const std::string& dbName, const std::string& pattern) +void ThriftHiveMetastoreConcurrentClient::list_roles(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type) { - int32_t seqid = send_get_functions(dbName, pattern); - recv_get_functions(_return, seqid); + int32_t seqid = send_list_roles(principal_name, principal_type); + recv_list_roles(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_functions(const std::string& dbName, const std::string& pattern) +int32_t ThriftHiveMetastoreConcurrentClient::send_list_roles(const std::string& principal_name, const PrincipalType::type principal_type) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_functions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("list_roles", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_functions_pargs args; - args.dbName = &dbName; - args.pattern = &pattern; + ThriftHiveMetastore_list_roles_pargs args; + args.principal_name = &principal_name; + args.principal_type = &principal_type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69621,7 +72365,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_functions(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69650,7 +72394,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_functions") != 0) { + if (fname.compare("list_roles") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69659,7 +72403,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vectorreadMessageEnd(); @@ -69675,7 +72419,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -69685,21 +72429,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_function_pargs args; - args.dbName = &dbName; - args.funcName = &funcName; + ThriftHiveMetastore_grant_revoke_role_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69710,7 +72453,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_function(const std::string return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRoleResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69739,7 +72482,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_function") != 0) { + if (fname.compare("grant_revoke_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69748,7 +72491,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_function_presult result; + ThriftHiveMetastore_grant_revoke_role_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -69763,12 +72506,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, c sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_function failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69778,19 +72517,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, c } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_all_functions(GetAllFunctionsResponse& _return) +void ThriftHiveMetastoreConcurrentClient::get_principals_in_role(GetPrincipalsInRoleResponse& _return, const GetPrincipalsInRoleRequest& request) { - int32_t seqid = send_get_all_functions(); - recv_get_all_functions(_return, seqid); + int32_t seqid = send_get_principals_in_role(request); + recv_get_principals_in_role(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_functions() +int32_t ThriftHiveMetastoreConcurrentClient::send_get_principals_in_role(const GetPrincipalsInRoleRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_all_functions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_principals_in_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_functions_pargs args; + ThriftHiveMetastore_get_principals_in_role_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69801,7 +72541,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_functions() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctionsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincipalsInRoleResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69830,7 +72570,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctions iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_functions") != 0) { + if (fname.compare("get_principals_in_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69839,7 +72579,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctions using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_all_functions_presult result; + ThriftHiveMetastore_get_principals_in_role_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -69855,95 +72595,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctions throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_functions failed: unknown result"); - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -bool ThriftHiveMetastoreConcurrentClient::create_role(const Role& role) -{ - int32_t seqid = send_create_role(role); - return recv_create_role(seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_create_role(const Role& role) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_role", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_create_role_pargs args; - args.role = &role; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -bool ThriftHiveMetastoreConcurrentClient::recv_create_role(const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("create_role") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - bool _return; - ThriftHiveMetastore_create_role_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - sentry.commit(); - return _return; - } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_principals_in_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69953,20 +72605,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_role(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_role(const std::string& role_name) +void ThriftHiveMetastoreConcurrentClient::get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const GetRoleGrantsForPrincipalRequest& request) { - int32_t seqid = send_drop_role(role_name); - return recv_drop_role(seqid); + int32_t seqid = send_get_role_grants_for_principal(request); + recv_get_role_grants_for_principal(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_role(const std::string& role_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_grants_for_principal(const GetRoleGrantsForPrincipalRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_role_grants_for_principal", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_role_pargs args; - args.role_name = &role_name; + ThriftHiveMetastore_get_role_grants_for_principal_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69977,7 +72629,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_role(const std::string& r return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -70006,7 +72658,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_role") != 0) { + if (fname.compare("get_role_grants_for_principal") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70015,23 +72667,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_role_presult result; + ThriftHiveMetastore_get_role_grants_for_principal_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_role_grants_for_principal failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70041,19 +72693,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_role_names(std::vector & _return) +void ThriftHiveMetastoreConcurrentClient::get_privilege_set(PrincipalPrivilegeSet& _return, const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) { - int32_t seqid = send_get_role_names(); - recv_get_role_names(_return, seqid); + int32_t seqid = send_get_privilege_set(hiveObject, user_name, group_names); + recv_get_privilege_set(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_names() +int32_t ThriftHiveMetastoreConcurrentClient::send_get_privilege_set(const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_role_names", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_privilege_set", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_role_names_pargs args; + ThriftHiveMetastore_get_privilege_set_pargs args; + args.hiveObject = &hiveObject; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70064,7 +72719,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_names() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivilegeSet& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -70093,7 +72748,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_role_names") != 0) { + if (fname.compare("get_privilege_set") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70102,7 +72757,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vectorreadMessageEnd(); @@ -70118,7 +72773,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -70128,25 +72783,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) { - int32_t seqid = send_grant_role(role_name, principal_name, principal_type, grantor, grantorType, grant_option); - return recv_grant_role(seqid); + int32_t seqid = send_list_privileges(principal_name, principal_type, hiveObject); + recv_list_privileges(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_grant_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type, const std::string& grantor, const PrincipalType::type grantorType, const bool grant_option) +int32_t ThriftHiveMetastoreConcurrentClient::send_list_privileges(const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("grant_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("list_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_role_pargs args; - args.role_name = &role_name; + ThriftHiveMetastore_list_privileges_pargs args; args.principal_name = &principal_name; args.principal_type = &principal_type; - args.grantor = &grantor; - args.grantorType = &grantorType; - args.grant_option = &grant_option; + args.hiveObject = &hiveObject; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70157,7 +72809,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_grant_role(const std::string& return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_grant_role(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -70186,7 +72838,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_role(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_role") != 0) { + if (fname.compare("list_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70195,23 +72847,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_role(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_grant_role_presult result; + ThriftHiveMetastore_list_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "list_privileges failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70221,22 +72873,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_role(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) +bool ThriftHiveMetastoreConcurrentClient::grant_privileges(const PrivilegeBag& privileges) { - int32_t seqid = send_revoke_role(role_name, principal_name, principal_type); - return recv_revoke_role(seqid); + int32_t seqid = send_grant_privileges(privileges); + return recv_grant_privileges(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) +int32_t ThriftHiveMetastoreConcurrentClient::send_grant_privileges(const PrivilegeBag& privileges) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_revoke_role_pargs args; - args.role_name = &role_name; - args.principal_name = &principal_name; - args.principal_type = &principal_type; + ThriftHiveMetastore_grant_privileges_pargs args; + args.privileges = &privileges; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70247,7 +72897,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_role(const std::string& return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t seqid) { int32_t rseqid = 0; @@ -70276,7 +72926,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("revoke_role") != 0) { + if (fname.compare("grant_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70286,7 +72936,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_revoke_role_presult result; + ThriftHiveMetastore_grant_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -70301,7 +72951,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_privileges failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70311,21 +72961,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::list_roles(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type) +bool ThriftHiveMetastoreConcurrentClient::revoke_privileges(const PrivilegeBag& privileges) { - int32_t seqid = send_list_roles(principal_name, principal_type); - recv_list_roles(_return, seqid); + int32_t seqid = send_revoke_privileges(privileges); + return recv_revoke_privileges(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_list_roles(const std::string& principal_name, const PrincipalType::type principal_type) +int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_privileges(const PrivilegeBag& privileges) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("list_roles", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_list_roles_pargs args; - args.principal_name = &principal_name; - args.principal_type = &principal_type; + ThriftHiveMetastore_revoke_privileges_pargs args; + args.privileges = &privileges; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70336,7 +72985,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_list_roles(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t seqid) { int32_t rseqid = 0; @@ -70365,7 +73014,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _r iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("list_roles") != 0) { + if (fname.compare("revoke_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70374,23 +73023,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _r using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_list_roles_presult result; + bool _return; + ThriftHiveMetastore_revoke_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "list_roles failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_privileges failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70400,19 +73049,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _r } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::grant_revoke_role(GrantRevokeRoleResponse& _return, const GrantRevokeRoleRequest& request) +void ThriftHiveMetastoreConcurrentClient::grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const GrantRevokePrivilegeRequest& request) { - int32_t seqid = send_grant_revoke_role(request); - recv_grant_revoke_role(_return, seqid); + int32_t seqid = send_grant_revoke_privileges(request); + recv_grant_revoke_privileges(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_role(const GrantRevokeRoleRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_privileges(const GrantRevokePrivilegeRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("grant_revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_revoke_role_pargs args; + ThriftHiveMetastore_grant_revoke_privileges_pargs args; args.request = &request; args.write(oprot_); @@ -70424,7 +73073,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_role(const GrantR return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRoleResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -70453,7 +73102,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRole iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_revoke_role") != 0) { + if (fname.compare("grant_revoke_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70462,7 +73111,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRole using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_grant_revoke_role_presult result; + ThriftHiveMetastore_grant_revoke_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -70478,7 +73127,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRole throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_privileges failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70488,20 +73137,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRole } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_principals_in_role(GetPrincipalsInRoleResponse& _return, const GetPrincipalsInRoleRequest& request) +void ThriftHiveMetastoreConcurrentClient::set_ugi(std::vector & _return, const std::string& user_name, const std::vector & group_names) { - int32_t seqid = send_get_principals_in_role(request); - recv_get_principals_in_role(_return, seqid); + int32_t seqid = send_set_ugi(user_name, group_names); + recv_set_ugi(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_principals_in_role(const GetPrincipalsInRoleRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_set_ugi(const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_principals_in_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("set_ugi", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_principals_in_role_pargs args; - args.request = &request; + ThriftHiveMetastore_set_ugi_pargs args; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70512,7 +73162,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_principals_in_role(const G return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincipalsInRoleResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -70541,7 +73191,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincip iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_principals_in_role") != 0) { + if (fname.compare("set_ugi") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70550,7 +73200,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincip using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_principals_in_role_presult result; + ThriftHiveMetastore_set_ugi_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -70566,7 +73216,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincip throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_principals_in_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_ugi failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70576,20 +73226,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincip } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const GetRoleGrantsForPrincipalRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_delegation_token(std::string& _return, const std::string& token_owner, const std::string& renewer_kerberos_principal_name) { - int32_t seqid = send_get_role_grants_for_principal(request); - recv_get_role_grants_for_principal(_return, seqid); + int32_t seqid = send_get_delegation_token(token_owner, renewer_kerberos_principal_name); + recv_get_delegation_token(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_grants_for_principal(const GetRoleGrantsForPrincipalRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_delegation_token(const std::string& token_owner, const std::string& renewer_kerberos_principal_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_role_grants_for_principal", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_role_grants_for_principal_pargs args; - args.request = &request; + ThriftHiveMetastore_get_delegation_token_pargs args; + args.token_owner = &token_owner; + args.renewer_kerberos_principal_name = &renewer_kerberos_principal_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70600,7 +73251,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_grants_for_principal( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -70629,7 +73280,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(Get iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_role_grants_for_principal") != 0) { + if (fname.compare("get_delegation_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70638,7 +73289,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(Get using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_role_grants_for_principal_presult result; + ThriftHiveMetastore_get_delegation_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -70654,7 +73305,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(Get throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_role_grants_for_principal failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_delegation_token failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70664,22 +73315,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(Get } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_privilege_set(PrincipalPrivilegeSet& _return, const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) +int64_t ThriftHiveMetastoreConcurrentClient::renew_delegation_token(const std::string& token_str_form) { - int32_t seqid = send_get_privilege_set(hiveObject, user_name, group_names); - recv_get_privilege_set(_return, seqid); + int32_t seqid = send_renew_delegation_token(token_str_form); + return recv_renew_delegation_token(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_privilege_set(const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_renew_delegation_token(const std::string& token_str_form) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_privilege_set", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("renew_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_privilege_set_pargs args; - args.hiveObject = &hiveObject; - args.user_name = &user_name; - args.group_names = &group_names; + ThriftHiveMetastore_renew_delegation_token_pargs args; + args.token_str_form = &token_str_form; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70690,7 +73339,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_privilege_set(const HiveOb return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivilegeSet& _return, const int32_t seqid) +int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const int32_t seqid) { int32_t rseqid = 0; @@ -70719,7 +73368,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivil iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_privilege_set") != 0) { + if (fname.compare("renew_delegation_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70728,23 +73377,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivil using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_privilege_set_presult result; + int64_t _return; + ThriftHiveMetastore_renew_delegation_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_privilege_set failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "renew_delegation_token failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70754,22 +73403,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivil } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::list_privileges(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) +void ThriftHiveMetastoreConcurrentClient::cancel_delegation_token(const std::string& token_str_form) { - int32_t seqid = send_list_privileges(principal_name, principal_type, hiveObject); - recv_list_privileges(_return, seqid); + int32_t seqid = send_cancel_delegation_token(token_str_form); + recv_cancel_delegation_token(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_list_privileges(const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) +int32_t ThriftHiveMetastoreConcurrentClient::send_cancel_delegation_token(const std::string& token_str_form) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("list_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("cancel_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_list_privileges_pargs args; - args.principal_name = &principal_name; - args.principal_type = &principal_type; - args.hiveObject = &hiveObject; + ThriftHiveMetastore_cancel_delegation_token_pargs args; + args.token_str_form = &token_str_form; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70780,7 +73427,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_list_privileges(const std::str return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int32_t seqid) { int32_t rseqid = 0; @@ -70809,7 +73456,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("list_privileges") != 0) { + if (fname.compare("cancel_delegation_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70818,23 +73465,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "list_privileges failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70844,20 +73485,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("grant_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_privileges_pargs args; - args.privileges = &privileges; + ThriftHiveMetastore_add_token_pargs args; + args.token_identifier = &token_identifier; + args.delegation_token = &delegation_token; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70868,7 +73510,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_grant_privileges(const Privile return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) { int32_t rseqid = 0; @@ -70897,7 +73539,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t se iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_privileges") != 0) { + if (fname.compare("add_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70907,7 +73549,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t se throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_grant_privileges_presult result; + ThriftHiveMetastore_add_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -70917,12 +73559,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t se sentry.commit(); return _return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_privileges failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_token failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -70932,20 +73570,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t se } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::revoke_privileges(const PrivilegeBag& privileges) +bool ThriftHiveMetastoreConcurrentClient::remove_token(const std::string& token_identifier) { - int32_t seqid = send_revoke_privileges(privileges); - return recv_revoke_privileges(seqid); + int32_t seqid = send_remove_token(token_identifier); + return recv_remove_token(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_privileges(const PrivilegeBag& privileges) +int32_t ThriftHiveMetastoreConcurrentClient::send_remove_token(const std::string& token_identifier) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("remove_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_revoke_privileges_pargs args; - args.privileges = &privileges; + ThriftHiveMetastore_remove_token_pargs args; + args.token_identifier = &token_identifier; args.write(oprot_); oprot_->writeMessageEnd(); @@ -70956,7 +73594,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_privileges(const Privil return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) { int32_t rseqid = 0; @@ -70985,7 +73623,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("revoke_privileges") != 0) { + if (fname.compare("remove_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70995,7 +73633,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t s throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_revoke_privileges_presult result; + ThriftHiveMetastore_remove_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -71005,12 +73643,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t s sentry.commit(); return _return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_privileges failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_token failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71020,20 +73654,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t s } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const GrantRevokePrivilegeRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_token(std::string& _return, const std::string& token_identifier) { - int32_t seqid = send_grant_revoke_privileges(request); - recv_grant_revoke_privileges(_return, seqid); + int32_t seqid = send_get_token(token_identifier); + recv_get_token(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_privileges(const GrantRevokePrivilegeRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_token(const std::string& token_identifier) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("grant_revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_revoke_privileges_pargs args; - args.request = &request; + ThriftHiveMetastore_get_token_pargs args; + args.token_identifier = &token_identifier; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71044,7 +73678,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_privileges(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -71073,7 +73707,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevo iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_revoke_privileges") != 0) { + if (fname.compare("get_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71082,7 +73716,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevo using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_grant_revoke_privileges_presult result; + ThriftHiveMetastore_get_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -71093,12 +73727,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevo sentry.commit(); return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_privileges failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_token failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71108,21 +73738,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevo } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::set_ugi(std::vector & _return, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreConcurrentClient::get_all_token_identifiers(std::vector & _return) { - int32_t seqid = send_set_ugi(user_name, group_names); - recv_set_ugi(_return, seqid); + int32_t seqid = send_get_all_token_identifiers(); + recv_get_all_token_identifiers(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_set_ugi(const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_token_identifiers() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("set_ugi", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_token_identifiers", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_set_ugi_pargs args; - args.user_name = &user_name; - args.group_names = &group_names; + ThriftHiveMetastore_get_all_token_identifiers_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71133,7 +73761,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_set_ugi(const std::string& use return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -71162,7 +73790,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("set_ugi") != 0) { + if (fname.compare("get_all_token_identifiers") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71171,7 +73799,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_set_ugi_presult result; + ThriftHiveMetastore_get_all_token_identifiers_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -71182,12 +73810,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector sentry.commit(); return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_ugi failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_token_identifiers failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71197,21 +73821,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_delegation_token(std::string& _return, const std::string& token_owner, const std::string& renewer_kerberos_principal_name) +int32_t ThriftHiveMetastoreConcurrentClient::add_master_key(const std::string& key) { - int32_t seqid = send_get_delegation_token(token_owner, renewer_kerberos_principal_name); - recv_get_delegation_token(_return, seqid); + int32_t seqid = send_add_master_key(key); + return recv_add_master_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_delegation_token(const std::string& token_owner, const std::string& renewer_kerberos_principal_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_master_key(const std::string& key) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_master_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_delegation_token_pargs args; - args.token_owner = &token_owner; - args.renewer_kerberos_principal_name = &renewer_kerberos_principal_name; + ThriftHiveMetastore_add_master_key_pargs args; + args.key = &key; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71222,7 +73845,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_delegation_token(const std return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& _return, const int32_t seqid) +int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t seqid) { int32_t rseqid = 0; @@ -71251,7 +73874,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_delegation_token") != 0) { + if (fname.compare("add_master_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71260,23 +73883,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_delegation_token_presult result; + int32_t _return; + ThriftHiveMetastore_add_master_key_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_delegation_token failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_master_key failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71286,20 +73909,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& } // end while(true) } -int64_t ThriftHiveMetastoreConcurrentClient::renew_delegation_token(const std::string& token_str_form) +void ThriftHiveMetastoreConcurrentClient::update_master_key(const int32_t seq_number, const std::string& key) { - int32_t seqid = send_renew_delegation_token(token_str_form); - return recv_renew_delegation_token(seqid); + int32_t seqid = send_update_master_key(seq_number, key); + recv_update_master_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_renew_delegation_token(const std::string& token_str_form) +int32_t ThriftHiveMetastoreConcurrentClient::send_update_master_key(const int32_t seq_number, const std::string& key) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("renew_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("update_master_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_renew_delegation_token_pargs args; - args.token_str_form = &token_str_form; + ThriftHiveMetastore_update_master_key_pargs args; + args.seq_number = &seq_number; + args.key = &key; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71310,7 +73934,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_renew_delegation_token(const s return cseqid; } -int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t seqid) { int32_t rseqid = 0; @@ -71339,7 +73963,7 @@ int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const i iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("renew_delegation_token") != 0) { + if (fname.compare("update_master_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71348,23 +73972,21 @@ int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const i using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int64_t _return; - ThriftHiveMetastore_renew_delegation_token_presult result; - result.success = &_return; + ThriftHiveMetastore_update_master_key_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - sentry.commit(); - return _return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "renew_delegation_token failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71374,20 +73996,20 @@ int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const i } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::cancel_delegation_token(const std::string& token_str_form) +bool ThriftHiveMetastoreConcurrentClient::remove_master_key(const int32_t key_seq) { - int32_t seqid = send_cancel_delegation_token(token_str_form); - recv_cancel_delegation_token(seqid); + int32_t seqid = send_remove_master_key(key_seq); + return recv_remove_master_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_cancel_delegation_token(const std::string& token_str_form) +int32_t ThriftHiveMetastoreConcurrentClient::send_remove_master_key(const int32_t key_seq) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("cancel_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("remove_master_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_cancel_delegation_token_pargs args; - args.token_str_form = &token_str_form; + ThriftHiveMetastore_remove_master_key_pargs args; + args.key_seq = &key_seq; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71398,7 +74020,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_cancel_delegation_token(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t seqid) { int32_t rseqid = 0; @@ -71427,7 +74049,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("cancel_delegation_token") != 0) { + if (fname.compare("remove_master_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71436,17 +74058,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_cancel_delegation_token_presult result; + bool _return; + ThriftHiveMetastore_remove_master_key_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { sentry.commit(); - throw result.o1; + return _return; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_master_key failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71456,21 +74080,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::add_token(const std::string& token_identifier, const std::string& delegation_token) +void ThriftHiveMetastoreConcurrentClient::get_master_keys(std::vector & _return) { - int32_t seqid = send_add_token(token_identifier, delegation_token); - return recv_add_token(seqid); + int32_t seqid = send_get_master_keys(); + recv_get_master_keys(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_token(const std::string& token_identifier, const std::string& delegation_token) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_master_keys() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_master_keys", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_token_pargs args; - args.token_identifier = &token_identifier; - args.delegation_token = &delegation_token; + ThriftHiveMetastore_get_master_keys_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71481,7 +74103,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_token(const std::string& t return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -71510,7 +74132,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_token") != 0) { + if (fname.compare("get_master_keys") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71519,19 +74141,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_add_token_presult result; + ThriftHiveMetastore_get_master_keys_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_token failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_master_keys failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71541,20 +74163,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::remove_token(const std::string& token_identifier) +void ThriftHiveMetastoreConcurrentClient::get_open_txns(GetOpenTxnsResponse& _return) { - int32_t seqid = send_remove_token(token_identifier); - return recv_remove_token(seqid); + int32_t seqid = send_get_open_txns(); + recv_get_open_txns(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_remove_token(const std::string& token_identifier) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("remove_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_open_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_remove_token_pargs args; - args.token_identifier = &token_identifier; + ThriftHiveMetastore_get_open_txns_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71565,7 +74186,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_remove_token(const std::string return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -71594,7 +74215,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("remove_token") != 0) { + if (fname.compare("get_open_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71603,19 +74224,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_remove_token_presult result; + ThriftHiveMetastore_get_open_txns_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_token failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71625,20 +74246,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_token(std::string& _return, const std::string& token_identifier) +void ThriftHiveMetastoreConcurrentClient::get_open_txns_info(GetOpenTxnsInfoResponse& _return) { - int32_t seqid = send_get_token(token_identifier); - recv_get_token(_return, seqid); + int32_t seqid = send_get_open_txns_info(); + recv_get_open_txns_info(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_token(const std::string& token_identifier) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns_info() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_open_txns_info", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_token_pargs args; - args.token_identifier = &token_identifier; + ThriftHiveMetastore_get_open_txns_info_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71649,7 +74269,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_token(const std::string& t return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInfoResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -71678,7 +74298,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_token") != 0) { + if (fname.compare("get_open_txns_info") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71687,7 +74307,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_token_presult result; + ThriftHiveMetastore_get_open_txns_info_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -71699,7 +74319,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, c return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_token failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns_info failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71709,19 +74329,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, c } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_all_token_identifiers(std::vector & _return) +void ThriftHiveMetastoreConcurrentClient::open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& rqst) { - int32_t seqid = send_get_all_token_identifiers(); - recv_get_all_token_identifiers(_return, seqid); + int32_t seqid = send_open_txns(rqst); + recv_open_txns(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_token_identifiers() +int32_t ThriftHiveMetastoreConcurrentClient::send_open_txns(const OpenTxnRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_all_token_identifiers", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("open_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_token_identifiers_pargs args; + ThriftHiveMetastore_open_txns_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71732,7 +74353,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_token_identifiers() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -71761,7 +74382,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_token_identifiers") != 0) { + if (fname.compare("open_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71770,7 +74391,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_all_token_identifiers_presult result; + ThriftHiveMetastore_open_txns_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -71782,7 +74403,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_token_identifiers failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "open_txns failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71792,20 +74413,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve } // end while(true) } -int32_t ThriftHiveMetastoreConcurrentClient::add_master_key(const std::string& key) +void ThriftHiveMetastoreConcurrentClient::abort_txn(const AbortTxnRequest& rqst) { - int32_t seqid = send_add_master_key(key); - return recv_add_master_key(seqid); + int32_t seqid = send_abort_txn(rqst); + recv_abort_txn(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_master_key(const std::string& key) +int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txn(const AbortTxnRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("abort_txn", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_master_key_pargs args; - args.key = &key; + ThriftHiveMetastore_abort_txn_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71816,7 +74437,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_master_key(const std::stri return cseqid; } -int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) { int32_t rseqid = 0; @@ -71845,7 +74466,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_master_key") != 0) { + if (fname.compare("abort_txn") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71854,23 +74475,17 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t s using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int32_t _return; - ThriftHiveMetastore_add_master_key_presult result; - result.success = &_return; + ThriftHiveMetastore_abort_txn_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - sentry.commit(); - return _return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_master_key failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -71880,21 +74495,20 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t s } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::update_master_key(const int32_t seq_number, const std::string& key) +void ThriftHiveMetastoreConcurrentClient::abort_txns(const AbortTxnsRequest& rqst) { - int32_t seqid = send_update_master_key(seq_number, key); - recv_update_master_key(seqid); + int32_t seqid = send_abort_txns(rqst); + recv_abort_txns(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_update_master_key(const int32_t seq_number, const std::string& key) +int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txns(const AbortTxnsRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("update_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("abort_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_update_master_key_pargs args; - args.seq_number = &seq_number; - args.key = &key; + ThriftHiveMetastore_abort_txns_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71905,7 +74519,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_update_master_key(const int32_ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) { int32_t rseqid = 0; @@ -71934,7 +74548,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("update_master_key") != 0) { + if (fname.compare("abort_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71943,7 +74557,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t s using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_update_master_key_presult result; + ThriftHiveMetastore_abort_txns_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -71952,10 +74566,6 @@ void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t s sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } sentry.commit(); return; } @@ -71967,20 +74577,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t s } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::remove_master_key(const int32_t key_seq) +void ThriftHiveMetastoreConcurrentClient::commit_txn(const CommitTxnRequest& rqst) { - int32_t seqid = send_remove_master_key(key_seq); - return recv_remove_master_key(seqid); + int32_t seqid = send_commit_txn(rqst); + recv_commit_txn(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_remove_master_key(const int32_t key_seq) +int32_t ThriftHiveMetastoreConcurrentClient::send_commit_txn(const CommitTxnRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("remove_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("commit_txn", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_remove_master_key_pargs args; - args.key_seq = &key_seq; + ThriftHiveMetastore_commit_txn_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -71991,7 +74601,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_remove_master_key(const int32_ return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) { int32_t rseqid = 0; @@ -72020,7 +74630,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("remove_master_key") != 0) { + if (fname.compare("commit_txn") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72029,19 +74639,21 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t s using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_remove_master_key_presult result; - result.success = &_return; + ThriftHiveMetastore_commit_txn_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { + if (result.__isset.o1) { sentry.commit(); - return _return; + throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_master_key failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72051,19 +74663,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t s } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_master_keys(std::vector & _return) +void ThriftHiveMetastoreConcurrentClient::lock(LockResponse& _return, const LockRequest& rqst) { - int32_t seqid = send_get_master_keys(); - recv_get_master_keys(_return, seqid); + int32_t seqid = send_lock(rqst); + recv_lock(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_master_keys() +int32_t ThriftHiveMetastoreConcurrentClient::send_lock(const LockRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_master_keys", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("lock", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_master_keys_pargs args; + ThriftHiveMetastore_lock_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -72074,7 +74687,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_master_keys() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -72103,7 +74716,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_master_keys") != 0) { + if (fname.compare("lock") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72112,7 +74725,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorreadMessageEnd(); @@ -72123,8 +74736,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -72134,19 +74755,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_open_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("check_lock", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_open_txns_pargs args; + ThriftHiveMetastore_check_lock_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -72157,7 +74779,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -72186,7 +74808,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_open_txns") != 0) { + if (fname.compare("check_lock") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72195,7 +74817,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_open_txns_presult result; + ThriftHiveMetastore_check_lock_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -72206,8 +74828,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse sentry.commit(); return; } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "check_lock failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72217,19 +74851,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_open_txns_info(GetOpenTxnsInfoResponse& _return) +void ThriftHiveMetastoreConcurrentClient::unlock(const UnlockRequest& rqst) { - int32_t seqid = send_get_open_txns_info(); - recv_get_open_txns_info(_return, seqid); + int32_t seqid = send_unlock(rqst); + recv_unlock(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns_info() +int32_t ThriftHiveMetastoreConcurrentClient::send_unlock(const UnlockRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_open_txns_info", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("unlock", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_open_txns_info_pargs args; + ThriftHiveMetastore_unlock_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -72240,7 +74875,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns_info() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInfoResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) { int32_t rseqid = 0; @@ -72269,7 +74904,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInf iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_open_txns_info") != 0) { + if (fname.compare("unlock") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72278,19 +74913,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInf using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_open_txns_info_presult result; - result.success = &_return; + ThriftHiveMetastore_unlock_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns_info failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72300,19 +74937,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInf } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::show_locks(ShowLocksResponse& _return, const ShowLocksRequest& rqst) { - int32_t seqid = send_open_txns(rqst); - recv_open_txns(_return, seqid); + int32_t seqid = send_show_locks(rqst); + recv_show_locks(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_open_txns(const OpenTxnRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_show_locks(const ShowLocksRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("open_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("show_locks", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_open_txns_pargs args; + ThriftHiveMetastore_show_locks_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -72324,7 +74961,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_open_txns(const OpenTxnRequest return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -72353,7 +74990,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _retu iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("open_txns") != 0) { + if (fname.compare("show_locks") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72362,7 +74999,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _retu using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_open_txns_presult result; + ThriftHiveMetastore_show_locks_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -72374,7 +75011,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _retu return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "open_txns failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_locks failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72384,20 +75021,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _retu } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::abort_txn(const AbortTxnRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::heartbeat(const HeartbeatRequest& ids) { - int32_t seqid = send_abort_txn(rqst); - recv_abort_txn(seqid); + int32_t seqid = send_heartbeat(ids); + recv_heartbeat(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txn(const AbortTxnRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat(const HeartbeatRequest& ids) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("abort_txn", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_abort_txn_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_heartbeat_pargs args; + args.ids = &ids; args.write(oprot_); oprot_->writeMessageEnd(); @@ -72408,7 +75045,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txn(const AbortTxnReques return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) { int32_t rseqid = 0; @@ -72437,7 +75074,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("abort_txn") != 0) { + if (fname.compare("heartbeat") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72446,7 +75083,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_abort_txn_presult result; + ThriftHiveMetastore_heartbeat_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72455,6 +75092,14 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } sentry.commit(); return; } @@ -72466,20 +75111,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::abort_txns(const AbortTxnsRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const HeartbeatTxnRangeRequest& txns) { - int32_t seqid = send_abort_txns(rqst); - recv_abort_txns(seqid); + int32_t seqid = send_heartbeat_txn_range(txns); + recv_heartbeat_txn_range(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txns(const AbortTxnsRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat_txn_range(const HeartbeatTxnRangeRequest& txns) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("abort_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_abort_txns_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_heartbeat_txn_range_pargs args; + args.txns = &txns; args.write(oprot_); oprot_->writeMessageEnd(); @@ -72490,7 +75135,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txns(const AbortTxnsRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -72519,7 +75164,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("abort_txns") != 0) { + if (fname.compare("heartbeat_txn_range") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72528,17 +75173,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_abort_txns_presult result; + ThriftHiveMetastore_heartbeat_txn_range_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_txn_range failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72548,19 +75195,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::commit_txn(const CommitTxnRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::compact(const CompactionRequest& rqst) { - int32_t seqid = send_commit_txn(rqst); - recv_commit_txn(seqid); + int32_t seqid = send_compact(rqst); + recv_compact(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_commit_txn(const CommitTxnRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_compact(const CompactionRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("commit_txn", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("compact", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_commit_txn_pargs args; + ThriftHiveMetastore_compact_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -72572,7 +75219,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_commit_txn(const CommitTxnRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) { int32_t rseqid = 0; @@ -72601,7 +75248,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("commit_txn") != 0) { + if (fname.compare("compact") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72610,19 +75257,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_commit_txn_presult result; + ThriftHiveMetastore_compact_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } sentry.commit(); return; } @@ -72634,19 +75273,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::lock(LockResponse& _return, const LockRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::compact2(CompactionResponse& _return, const CompactionRequest& rqst) { - int32_t seqid = send_lock(rqst); - recv_lock(_return, seqid); + int32_t seqid = send_compact2(rqst); + recv_compact2(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_lock(const LockRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_compact2(const CompactionRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("lock", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("compact2", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_lock_pargs args; + ThriftHiveMetastore_compact2_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -72658,7 +75297,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_lock(const LockRequest& rqst) return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_compact2(CompactionResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -72687,7 +75326,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("lock") != 0) { + if (fname.compare("compact2") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72696,7 +75335,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_lock_presult result; + ThriftHiveMetastore_compact2_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -72707,16 +75346,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const sentry.commit(); return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "lock failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "compact2 failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72726,19 +75357,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::check_lock(LockResponse& _return, const CheckLockRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::show_compact(ShowCompactResponse& _return, const ShowCompactRequest& rqst) { - int32_t seqid = send_check_lock(rqst); - recv_check_lock(_return, seqid); + int32_t seqid = send_show_compact(rqst); + recv_show_compact(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_check_lock(const CheckLockRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_show_compact(const ShowCompactRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("check_lock", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_check_lock_pargs args; + ThriftHiveMetastore_show_compact_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -72750,7 +75381,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_check_lock(const CheckLockRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -72779,7 +75410,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("check_lock") != 0) { + if (fname.compare("show_compact") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72788,7 +75419,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_check_lock_presult result; + ThriftHiveMetastore_show_compact_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -72799,20 +75430,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, sentry.commit(); return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "check_lock failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_compact failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72822,19 +75441,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::unlock(const UnlockRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::add_dynamic_partitions(const AddDynamicPartitions& rqst) { - int32_t seqid = send_unlock(rqst); - recv_unlock(seqid); + int32_t seqid = send_add_dynamic_partitions(rqst); + recv_add_dynamic_partitions(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_unlock(const UnlockRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_dynamic_partitions(const AddDynamicPartitions& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("unlock", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_unlock_pargs args; + ThriftHiveMetastore_add_dynamic_partitions_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -72846,7 +75465,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_unlock(const UnlockRequest& rq return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int32_t seqid) { int32_t rseqid = 0; @@ -72875,7 +75494,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("unlock") != 0) { + if (fname.compare("add_dynamic_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72884,7 +75503,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_unlock_presult result; + ThriftHiveMetastore_add_dynamic_partitions_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72908,19 +75527,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::show_locks(ShowLocksResponse& _return, const ShowLocksRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::get_next_notification(NotificationEventResponse& _return, const NotificationEventRequest& rqst) { - int32_t seqid = send_show_locks(rqst); - recv_show_locks(_return, seqid); + int32_t seqid = send_get_next_notification(rqst); + recv_get_next_notification(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_show_locks(const ShowLocksRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_next_notification(const NotificationEventRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("show_locks", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_show_locks_pargs args; + ThriftHiveMetastore_get_next_notification_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -72932,7 +75551,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_show_locks(const ShowLocksRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(NotificationEventResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -72961,7 +75580,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("show_locks") != 0) { + if (fname.compare("get_next_notification") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -72970,7 +75589,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_show_locks_presult result; + ThriftHiveMetastore_get_next_notification_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -72982,7 +75601,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_locks failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_next_notification failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -72992,20 +75611,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::heartbeat(const HeartbeatRequest& ids) +void ThriftHiveMetastoreConcurrentClient::get_current_notificationEventId(CurrentNotificationEventId& _return) { - int32_t seqid = send_heartbeat(ids); - recv_heartbeat(seqid); + int32_t seqid = send_get_current_notificationEventId(); + recv_get_current_notificationEventId(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat(const HeartbeatRequest& ids) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_current_notificationEventId() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_heartbeat_pargs args; - args.ids = &ids; + ThriftHiveMetastore_get_current_notificationEventId_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73016,7 +75634,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat(const HeartbeatReque return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(CurrentNotificationEventId& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73045,7 +75663,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("heartbeat") != 0) { + if (fname.compare("get_current_notificationEventId") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73054,25 +75672,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_heartbeat_presult result; + ThriftHiveMetastore_get_current_notificationEventId_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o3; + return; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_current_notificationEventId failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73082,20 +75694,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const HeartbeatTxnRangeRequest& txns) +void ThriftHiveMetastoreConcurrentClient::get_notification_events_count(NotificationEventsCountResponse& _return, const NotificationEventsCountRequest& rqst) { - int32_t seqid = send_heartbeat_txn_range(txns); - recv_heartbeat_txn_range(_return, seqid); + int32_t seqid = send_get_notification_events_count(rqst); + recv_get_notification_events_count(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat_txn_range(const HeartbeatTxnRangeRequest& txns) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_notification_events_count(const NotificationEventsCountRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_notification_events_count", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_heartbeat_txn_range_pargs args; - args.txns = &txns; + ThriftHiveMetastore_get_notification_events_count_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73106,7 +75718,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat_txn_range(const Hear return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_notification_events_count(NotificationEventsCountResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73135,7 +75747,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnR iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("heartbeat_txn_range") != 0) { + if (fname.compare("get_notification_events_count") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73144,7 +75756,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnR using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_heartbeat_txn_range_presult result; + ThriftHiveMetastore_get_notification_events_count_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -73156,85 +75768,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnR return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_txn_range failed: unknown result"); - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::compact(const CompactionRequest& rqst) -{ - int32_t seqid = send_compact(rqst); - recv_compact(seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_compact(const CompactionRequest& rqst) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("compact", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_compact_pargs args; - args.rqst = &rqst; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("compact") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - ThriftHiveMetastore_compact_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - sentry.commit(); - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_notification_events_count failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73244,19 +75778,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::compact2(CompactionResponse& _return, const CompactionRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::fire_listener_event(FireEventResponse& _return, const FireEventRequest& rqst) { - int32_t seqid = send_compact2(rqst); - recv_compact2(_return, seqid); + int32_t seqid = send_fire_listener_event(rqst); + recv_fire_listener_event(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_compact2(const CompactionRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_fire_listener_event(const FireEventRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("compact2", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_compact2_pargs args; + ThriftHiveMetastore_fire_listener_event_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -73268,7 +75802,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_compact2(const CompactionReque return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_compact2(CompactionResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73297,7 +75831,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact2(CompactionResponse& _ret iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("compact2") != 0) { + if (fname.compare("fire_listener_event") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73306,7 +75840,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact2(CompactionResponse& _ret using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_compact2_presult result; + ThriftHiveMetastore_fire_listener_event_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -73318,7 +75852,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact2(CompactionResponse& _ret return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "compact2 failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fire_listener_event failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73328,20 +75862,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact2(CompactionResponse& _ret } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::show_compact(ShowCompactResponse& _return, const ShowCompactRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::flushCache() { - int32_t seqid = send_show_compact(rqst); - recv_show_compact(_return, seqid); + int32_t seqid = send_flushCache(); + recv_flushCache(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_show_compact(const ShowCompactRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_flushCache() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_show_compact_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_flushCache_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73352,7 +75885,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_show_compact(const ShowCompact return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) { int32_t rseqid = 0; @@ -73381,7 +75914,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("show_compact") != 0) { + if (fname.compare("flushCache") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73390,19 +75923,13 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_show_compact_presult result; - result.success = &_return; + ThriftHiveMetastore_flushCache_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_compact failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73412,20 +75939,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_dynamic_partitions(const AddDynamicPartitions& rqst) +void ThriftHiveMetastoreConcurrentClient::cm_recycle(CmRecycleResponse& _return, const CmRecycleRequest& request) { - int32_t seqid = send_add_dynamic_partitions(rqst); - recv_add_dynamic_partitions(seqid); + int32_t seqid = send_cm_recycle(request); + recv_cm_recycle(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_dynamic_partitions(const AddDynamicPartitions& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_cm_recycle(const CmRecycleRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("cm_recycle", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_dynamic_partitions_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_cm_recycle_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73436,7 +75963,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_dynamic_partitions(const A return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_cm_recycle(CmRecycleResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73465,7 +75992,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int3 iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_dynamic_partitions") != 0) { + if (fname.compare("cm_recycle") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73474,21 +76001,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int3 using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_dynamic_partitions_presult result; + ThriftHiveMetastore_cm_recycle_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cm_recycle failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73498,20 +76027,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int3 } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_next_notification(NotificationEventResponse& _return, const NotificationEventRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) { - int32_t seqid = send_get_next_notification(rqst); - recv_get_next_notification(_return, seqid); + int32_t seqid = send_get_file_metadata_by_expr(req); + recv_get_file_metadata_by_expr(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_next_notification(const NotificationEventRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata_by_expr(const GetFileMetadataByExprRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_next_notification_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_get_file_metadata_by_expr_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73522,7 +76051,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_next_notification(const No return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(NotificationEventResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73551,7 +76080,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_next_notification") != 0) { + if (fname.compare("get_file_metadata_by_expr") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73560,7 +76089,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_next_notification_presult result; + ThriftHiveMetastore_get_file_metadata_by_expr_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -73572,7 +76101,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_next_notification failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata_by_expr failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73582,19 +76111,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_current_notificationEventId(CurrentNotificationEventId& _return) +void ThriftHiveMetastoreConcurrentClient::get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) { - int32_t seqid = send_get_current_notificationEventId(); - recv_get_current_notificationEventId(_return, seqid); + int32_t seqid = send_get_file_metadata(req); + recv_get_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_current_notificationEventId() +int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata(const GetFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_current_notificationEventId_pargs args; + ThriftHiveMetastore_get_file_metadata_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73605,7 +76135,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_current_notificationEventI return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(CurrentNotificationEventId& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73634,7 +76164,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_current_notificationEventId") != 0) { + if (fname.compare("get_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73643,7 +76173,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_current_notificationEventId_presult result; + ThriftHiveMetastore_get_file_metadata_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -73655,7 +76185,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_current_notificationEventId failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73665,20 +76195,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_notification_events_count(NotificationEventsCountResponse& _return, const NotificationEventsCountRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) { - int32_t seqid = send_get_notification_events_count(rqst); - recv_get_notification_events_count(_return, seqid); + int32_t seqid = send_put_file_metadata(req); + recv_put_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_notification_events_count(const NotificationEventsCountRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_put_file_metadata(const PutFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_notification_events_count", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_notification_events_count_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_put_file_metadata_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73689,7 +76219,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_notification_events_count( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_notification_events_count(NotificationEventsCountResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73718,7 +76248,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_notification_events_count(Not iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_notification_events_count") != 0) { + if (fname.compare("put_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73727,7 +76257,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_notification_events_count(Not using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_notification_events_count_presult result; + ThriftHiveMetastore_put_file_metadata_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -73739,7 +76269,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_notification_events_count(Not return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_notification_events_count failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "put_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73749,20 +76279,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_notification_events_count(Not } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::fire_listener_event(FireEventResponse& _return, const FireEventRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) { - int32_t seqid = send_fire_listener_event(rqst); - recv_fire_listener_event(_return, seqid); + int32_t seqid = send_clear_file_metadata(req); + recv_clear_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_fire_listener_event(const FireEventRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_clear_file_metadata(const ClearFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_fire_listener_event_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_clear_file_metadata_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73773,7 +76303,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_fire_listener_event(const Fire return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73802,7 +76332,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("fire_listener_event") != 0) { + if (fname.compare("clear_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73811,7 +76341,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_fire_listener_event_presult result; + ThriftHiveMetastore_clear_file_metadata_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -73823,7 +76353,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fire_listener_event failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "clear_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73833,19 +76363,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::flushCache() +void ThriftHiveMetastoreConcurrentClient::cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) { - int32_t seqid = send_flushCache(); - recv_flushCache(seqid); + int32_t seqid = send_cache_file_metadata(req); + recv_cache_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_flushCache() +int32_t ThriftHiveMetastoreConcurrentClient::send_cache_file_metadata(const CacheFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_flushCache_pargs args; + ThriftHiveMetastore_cache_file_metadata_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73856,7 +76387,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_flushCache() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73885,7 +76416,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("flushCache") != 0) { + if (fname.compare("cache_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73894,13 +76425,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_flushCache_presult result; + ThriftHiveMetastore_cache_file_metadata_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - sentry.commit(); - return; + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cache_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73910,20 +76447,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::cm_recycle(CmRecycleResponse& _return, const CmRecycleRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_metastore_db_uuid(std::string& _return) { - int32_t seqid = send_cm_recycle(request); - recv_cm_recycle(_return, seqid); + int32_t seqid = send_get_metastore_db_uuid(); + recv_get_metastore_db_uuid(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_cm_recycle(const CmRecycleRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_metastore_db_uuid() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("cm_recycle", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_metastore_db_uuid", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_cm_recycle_pargs args; - args.request = &request; + ThriftHiveMetastore_get_metastore_db_uuid_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -73934,7 +76470,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_cm_recycle(const CmRecycleRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_cm_recycle(CmRecycleResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_metastore_db_uuid(std::string& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -73963,7 +76499,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cm_recycle(CmRecycleResponse& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("cm_recycle") != 0) { + if (fname.compare("get_metastore_db_uuid") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -73972,7 +76508,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cm_recycle(CmRecycleResponse& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_cm_recycle_presult result; + ThriftHiveMetastore_get_metastore_db_uuid_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -73988,7 +76524,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cm_recycle(CmRecycleResponse& _re throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cm_recycle failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_metastore_db_uuid failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -73998,20 +76534,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_cm_recycle(CmRecycleResponse& _re } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) +void ThriftHiveMetastoreConcurrentClient::create_resource_plan(const WMResourcePlan& resourcePlan) { - int32_t seqid = send_get_file_metadata_by_expr(req); - recv_get_file_metadata_by_expr(_return, seqid); + int32_t seqid = send_create_resource_plan(resourcePlan); + recv_create_resource_plan(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata_by_expr(const GetFileMetadataByExprRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_resource_plan(const WMResourcePlan& resourcePlan) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_file_metadata_by_expr_pargs args; - args.req = &req; + ThriftHiveMetastore_create_resource_plan_pargs args; + args.resourcePlan = &resourcePlan; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74022,7 +76558,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata_by_expr(cons return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_resource_plan(const int32_t seqid) { int32_t rseqid = 0; @@ -74051,7 +76587,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFile iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_file_metadata_by_expr") != 0) { + if (fname.compare("create_resource_plan") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74060,19 +76596,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFile using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_file_metadata_by_expr_presult result; - result.success = &_return; + ThriftHiveMetastore_create_resource_plan_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata_by_expr failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74082,20 +76620,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFile } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_resource_plan(WMResourcePlan& _return, const std::string& name) { - int32_t seqid = send_get_file_metadata(req); - recv_get_file_metadata(_return, seqid); + int32_t seqid = send_get_resource_plan(name); + recv_get_resource_plan(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata(const GetFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_resource_plan(const std::string& name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_file_metadata_pargs args; - args.req = &req; + ThriftHiveMetastore_get_resource_plan_pargs args; + args.name = &name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74106,7 +76644,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata(const GetFil return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_resource_plan(WMResourcePlan& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -74135,7 +76673,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadata iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_file_metadata") != 0) { + if (fname.compare("get_resource_plan") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74144,7 +76682,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadata using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_file_metadata_presult result; + ThriftHiveMetastore_get_resource_plan_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -74155,8 +76693,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadata sentry.commit(); return; } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_resource_plan failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74166,20 +76712,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadata } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_all_resource_plans(std::vector & _return) { - int32_t seqid = send_put_file_metadata(req); - recv_put_file_metadata(_return, seqid); + int32_t seqid = send_get_all_resource_plans(); + recv_get_all_resource_plans(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_put_file_metadata(const PutFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_resource_plans() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_resource_plans", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_put_file_metadata_pargs args; - args.req = &req; + ThriftHiveMetastore_get_all_resource_plans_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74190,7 +76735,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_put_file_metadata(const PutFil return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_all_resource_plans(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -74219,7 +76764,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("put_file_metadata") != 0) { + if (fname.compare("get_all_resource_plans") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74228,7 +76773,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_put_file_metadata_presult result; + ThriftHiveMetastore_get_all_resource_plans_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -74239,8 +76784,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata sentry.commit(); return; } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "put_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_resource_plans failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74250,20 +76799,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::alter_resource_plan(const std::string& name, const WMResourcePlan& resourcePlan) { - int32_t seqid = send_clear_file_metadata(req); - recv_clear_file_metadata(_return, seqid); + int32_t seqid = send_alter_resource_plan(name, resourcePlan); + recv_alter_resource_plan(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_clear_file_metadata(const ClearFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_resource_plan(const std::string& name, const WMResourcePlan& resourcePlan) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_clear_file_metadata_pargs args; - args.req = &req; + ThriftHiveMetastore_alter_resource_plan_pargs args; + args.name = &name; + args.resourcePlan = &resourcePlan; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74274,7 +76824,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_clear_file_metadata(const Clea return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_resource_plan(const int32_t seqid) { int32_t rseqid = 0; @@ -74303,7 +76853,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("clear_file_metadata") != 0) { + if (fname.compare("alter_resource_plan") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74312,19 +76862,25 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_clear_file_metadata_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_resource_plan_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "clear_file_metadata failed: unknown result"); + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74334,20 +76890,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::validate_resource_plan(const std::string& name) { - int32_t seqid = send_cache_file_metadata(req); - recv_cache_file_metadata(_return, seqid); + int32_t seqid = send_validate_resource_plan(name); + recv_validate_resource_plan(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_cache_file_metadata(const CacheFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_validate_resource_plan(const std::string& name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("validate_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_cache_file_metadata_pargs args; - args.req = &req; + ThriftHiveMetastore_validate_resource_plan_pargs args; + args.name = &name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74358,7 +76914,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_cache_file_metadata(const Cach return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_validate_resource_plan(const int32_t seqid) { int32_t rseqid = 0; @@ -74387,7 +76943,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMeta iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("cache_file_metadata") != 0) { + if (fname.compare("validate_resource_plan") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74396,19 +76952,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMeta using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_cache_file_metadata_presult result; - result.success = &_return; + ThriftHiveMetastore_validate_resource_plan_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cache_file_metadata failed: unknown result"); + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -74418,19 +76976,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMeta } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_metastore_db_uuid(std::string& _return) +void ThriftHiveMetastoreConcurrentClient::drop_resource_plan(const std::string& name) { - int32_t seqid = send_get_metastore_db_uuid(); - recv_get_metastore_db_uuid(_return, seqid); + int32_t seqid = send_drop_resource_plan(name); + recv_drop_resource_plan(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_metastore_db_uuid() +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_resource_plan(const std::string& name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_metastore_db_uuid", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_resource_plan", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_metastore_db_uuid_pargs args; + ThriftHiveMetastore_drop_resource_plan_pargs args; + args.name = &name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -74441,7 +77000,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_metastore_db_uuid() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_metastore_db_uuid(std::string& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_resource_plan(const int32_t seqid) { int32_t rseqid = 0; @@ -74470,7 +77029,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_metastore_db_uuid(std::string iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_metastore_db_uuid") != 0) { + if (fname.compare("drop_resource_plan") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -74479,23 +77038,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_metastore_db_uuid(std::string using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_metastore_db_uuid_presult result; - result.success = &_return; + ThriftHiveMetastore_drop_resource_plan_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_metastore_db_uuid failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index 744ee59da3..ba0a632c56 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -184,6 +184,12 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) = 0; virtual void cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) = 0; virtual void get_metastore_db_uuid(std::string& _return) = 0; + virtual void create_resource_plan(const WMResourcePlan& resourcePlan) = 0; + virtual void get_resource_plan(WMResourcePlan& _return, const std::string& name) = 0; + virtual void get_all_resource_plans(std::vector & _return) = 0; + virtual void alter_resource_plan(const std::string& name, const WMResourcePlan& resourcePlan) = 0; + virtual void validate_resource_plan(const std::string& name) = 0; + virtual void drop_resource_plan(const std::string& name) = 0; }; class ThriftHiveMetastoreIfFactory : virtual public ::facebook::fb303::FacebookServiceIfFactory { @@ -727,6 +733,24 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void get_metastore_db_uuid(std::string& /* _return */) { return; } + void create_resource_plan(const WMResourcePlan& /* resourcePlan */) { + return; + } + void get_resource_plan(WMResourcePlan& /* _return */, const std::string& /* name */) { + return; + } + void get_all_resource_plans(std::vector & /* _return */) { + return; + } + void alter_resource_plan(const std::string& /* name */, const WMResourcePlan& /* resourcePlan */) { + return; + } + void validate_resource_plan(const std::string& /* name */) { + return; + } + void drop_resource_plan(const std::string& /* name */) { + return; + } }; typedef struct _ThriftHiveMetastore_getMetaConf_args__isset { @@ -20677,244 +20701,927 @@ class ThriftHiveMetastore_get_metastore_db_uuid_presult { }; -class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public ::facebook::fb303::FacebookServiceClient { +typedef struct _ThriftHiveMetastore_create_resource_plan_args__isset { + _ThriftHiveMetastore_create_resource_plan_args__isset() : resourcePlan(false) {} + bool resourcePlan :1; +} _ThriftHiveMetastore_create_resource_plan_args__isset; + +class ThriftHiveMetastore_create_resource_plan_args { public: - ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) : - ::facebook::fb303::FacebookServiceClient(prot, prot) {} - ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) : ::facebook::fb303::FacebookServiceClient(iprot, oprot) {} - boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() { - return piprot_; + + ThriftHiveMetastore_create_resource_plan_args(const ThriftHiveMetastore_create_resource_plan_args&); + ThriftHiveMetastore_create_resource_plan_args& operator=(const ThriftHiveMetastore_create_resource_plan_args&); + ThriftHiveMetastore_create_resource_plan_args() { } - boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() { - return poprot_; + + virtual ~ThriftHiveMetastore_create_resource_plan_args() throw(); + WMResourcePlan resourcePlan; + + _ThriftHiveMetastore_create_resource_plan_args__isset __isset; + + void __set_resourcePlan(const WMResourcePlan& val); + + bool operator == (const ThriftHiveMetastore_create_resource_plan_args & rhs) const + { + if (!(resourcePlan == rhs.resourcePlan)) + return false; + return true; } - void getMetaConf(std::string& _return, const std::string& key); - void send_getMetaConf(const std::string& key); - void recv_getMetaConf(std::string& _return); - void setMetaConf(const std::string& key, const std::string& value); - void send_setMetaConf(const std::string& key, const std::string& value); - void recv_setMetaConf(); - void create_database(const Database& database); - void send_create_database(const Database& database); - void recv_create_database(); - void get_database(Database& _return, const std::string& name); - void send_get_database(const std::string& name); - void recv_get_database(Database& _return); - void drop_database(const std::string& name, const bool deleteData, const bool cascade); - void send_drop_database(const std::string& name, const bool deleteData, const bool cascade); - void recv_drop_database(); - void get_databases(std::vector & _return, const std::string& pattern); - void send_get_databases(const std::string& pattern); - void recv_get_databases(std::vector & _return); - void get_all_databases(std::vector & _return); - void send_get_all_databases(); - void recv_get_all_databases(std::vector & _return); - void alter_database(const std::string& dbname, const Database& db); - void send_alter_database(const std::string& dbname, const Database& db); - void recv_alter_database(); - void get_type(Type& _return, const std::string& name); - void send_get_type(const std::string& name); - void recv_get_type(Type& _return); - bool create_type(const Type& type); - void send_create_type(const Type& type); - bool recv_create_type(); - bool drop_type(const std::string& type); - void send_drop_type(const std::string& type); - bool recv_drop_type(); - void get_type_all(std::map & _return, const std::string& name); - void send_get_type_all(const std::string& name); - void recv_get_type_all(std::map & _return); - void get_fields(std::vector & _return, const std::string& db_name, const std::string& table_name); - void send_get_fields(const std::string& db_name, const std::string& table_name); - void recv_get_fields(std::vector & _return); - void get_fields_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); - void send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); - void recv_get_fields_with_environment_context(std::vector & _return); - void get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name); - void send_get_schema(const std::string& db_name, const std::string& table_name); - void recv_get_schema(std::vector & _return); - void get_schema_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); - void send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); - void recv_get_schema_with_environment_context(std::vector & _return); - void create_table(const Table& tbl); - void send_create_table(const Table& tbl); - void recv_create_table(); - void create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context); - void send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context); - void recv_create_table_with_environment_context(); - void create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints); - void send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints); - void recv_create_table_with_constraints(); - void drop_constraint(const DropConstraintRequest& req); - void send_drop_constraint(const DropConstraintRequest& req); - void recv_drop_constraint(); - void add_primary_key(const AddPrimaryKeyRequest& req); - void send_add_primary_key(const AddPrimaryKeyRequest& req); - void recv_add_primary_key(); - void add_foreign_key(const AddForeignKeyRequest& req); - void send_add_foreign_key(const AddForeignKeyRequest& req); - void recv_add_foreign_key(); - void add_unique_constraint(const AddUniqueConstraintRequest& req); - void send_add_unique_constraint(const AddUniqueConstraintRequest& req); - void recv_add_unique_constraint(); - void add_not_null_constraint(const AddNotNullConstraintRequest& req); - void send_add_not_null_constraint(const AddNotNullConstraintRequest& req); - void recv_add_not_null_constraint(); - void drop_table(const std::string& dbname, const std::string& name, const bool deleteData); - void send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData); - void recv_drop_table(); - void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context); - void send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context); - void recv_drop_table_with_environment_context(); - void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames); - void send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames); - void recv_truncate_table(); - void get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern); - void send_get_tables(const std::string& db_name, const std::string& pattern); - void recv_get_tables(std::vector & _return); - void get_tables_by_type(std::vector & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType); - void send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType); - void recv_get_tables_by_type(std::vector & _return); - void get_table_meta(std::vector & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types); - void send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types); - void recv_get_table_meta(std::vector & _return); - void get_all_tables(std::vector & _return, const std::string& db_name); - void send_get_all_tables(const std::string& db_name); - void recv_get_all_tables(std::vector & _return); - void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name); - void send_get_table(const std::string& dbname, const std::string& tbl_name); - void recv_get_table(Table& _return); - void get_table_objects_by_name(std::vector

& _return, const std::string& dbname, const std::vector & tbl_names); - void send_get_table_objects_by_name(const std::string& dbname, const std::vector & tbl_names); - void recv_get_table_objects_by_name(std::vector
& _return); - void get_table_req(GetTableResult& _return, const GetTableRequest& req); - void send_get_table_req(const GetTableRequest& req); - void recv_get_table_req(GetTableResult& _return); - void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req); - void send_get_table_objects_by_name_req(const GetTablesRequest& req); - void recv_get_table_objects_by_name_req(GetTablesResult& _return); - void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables); - void send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables); - void recv_get_table_names_by_filter(std::vector & _return); - void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl); - void send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl); - void recv_alter_table(); - void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context); - void send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context); - void recv_alter_table_with_environment_context(); - void alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade); - void send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade); - void recv_alter_table_with_cascade(); - void add_partition(Partition& _return, const Partition& new_part); - void send_add_partition(const Partition& new_part); - void recv_add_partition(Partition& _return); - void add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context); - void send_add_partition_with_environment_context(const Partition& new_part, const EnvironmentContext& environment_context); - void recv_add_partition_with_environment_context(Partition& _return); - int32_t add_partitions(const std::vector & new_parts); - void send_add_partitions(const std::vector & new_parts); - int32_t recv_add_partitions(); - int32_t add_partitions_pspec(const std::vector & new_parts); - void send_add_partitions_pspec(const std::vector & new_parts); - int32_t recv_add_partitions_pspec(); - void append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); - void send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); - void recv_append_partition(Partition& _return); - void add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request); - void send_add_partitions_req(const AddPartitionsRequest& request); - void recv_add_partitions_req(AddPartitionsResult& _return); - void append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context); - void send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context); - void recv_append_partition_with_environment_context(Partition& _return); - void append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name); - void send_append_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name); - void recv_append_partition_by_name(Partition& _return); - void append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context); - void send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context); - void recv_append_partition_by_name_with_environment_context(Partition& _return); - bool drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData); - void send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData); - bool recv_drop_partition(); - bool drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context); - void send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context); - bool recv_drop_partition_with_environment_context(); - bool drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData); - void send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData); - bool recv_drop_partition_by_name(); - bool drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context); - void send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context); - bool recv_drop_partition_by_name_with_environment_context(); - void drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req); - void send_drop_partitions_req(const DropPartitionsRequest& req); - void recv_drop_partitions_req(DropPartitionsResult& _return); - void get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); - void send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); - void recv_get_partition(Partition& _return); - void exchange_partition(Partition& _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); - void send_exchange_partition(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); - void recv_exchange_partition(Partition& _return); - void exchange_partitions(std::vector & _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); - void send_exchange_partitions(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); - void recv_exchange_partitions(std::vector & _return); - void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names); - void send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names); - void recv_get_partition_with_auth(Partition& _return); - void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name); - void send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name); - void recv_get_partition_by_name(Partition& _return); - void get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); - void send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); - void recv_get_partitions(std::vector & _return); - void get_partitions_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); - void send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); - void recv_get_partitions_with_auth(std::vector & _return); - void get_partitions_pspec(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts); - void send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts); - void recv_get_partitions_pspec(std::vector & _return); - void get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); - void send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); - void recv_get_partition_names(std::vector & _return); - void get_partition_values(PartitionValuesResponse& _return, const PartitionValuesRequest& request); - void send_get_partition_values(const PartitionValuesRequest& request); - void recv_get_partition_values(PartitionValuesResponse& _return); - void get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); - void send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); - void recv_get_partitions_ps(std::vector & _return); - void get_partitions_ps_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); - void send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); - void recv_get_partitions_ps_with_auth(std::vector & _return); - void get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); - void send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); - void recv_get_partition_names_ps(std::vector & _return); - void get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts); - void send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts); - void recv_get_partitions_by_filter(std::vector & _return); - void get_part_specs_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts); - void send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts); - void recv_get_part_specs_by_filter(std::vector & _return); - void get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req); - void send_get_partitions_by_expr(const PartitionsByExprRequest& req); - void recv_get_partitions_by_expr(PartitionsByExprResult& _return); - int32_t get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter); - void send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter); - int32_t recv_get_num_partitions_by_filter(); - void get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names); - void send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names); - void recv_get_partitions_by_names(std::vector & _return); - void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part); - void send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part); - void recv_alter_partition(); - void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); - void send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); - void recv_alter_partitions(); - void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); - void send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); - void recv_alter_partitions_with_environment_context(); - void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); - void send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); - void recv_alter_partition_with_environment_context(); - void rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part); - void send_rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part); + bool operator != (const ThriftHiveMetastore_create_resource_plan_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_create_resource_plan_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_create_resource_plan_pargs { + public: + + + virtual ~ThriftHiveMetastore_create_resource_plan_pargs() throw(); + const WMResourcePlan* resourcePlan; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_create_resource_plan_result__isset { + _ThriftHiveMetastore_create_resource_plan_result__isset() : o1(false), o2(false) {} + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_create_resource_plan_result__isset; + +class ThriftHiveMetastore_create_resource_plan_result { + public: + + ThriftHiveMetastore_create_resource_plan_result(const ThriftHiveMetastore_create_resource_plan_result&); + ThriftHiveMetastore_create_resource_plan_result& operator=(const ThriftHiveMetastore_create_resource_plan_result&); + ThriftHiveMetastore_create_resource_plan_result() { + } + + virtual ~ThriftHiveMetastore_create_resource_plan_result() throw(); + InvalidObjectException o1; + MetaException o2; + + _ThriftHiveMetastore_create_resource_plan_result__isset __isset; + + void __set_o1(const InvalidObjectException& val); + + void __set_o2(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_create_resource_plan_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_create_resource_plan_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_create_resource_plan_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_create_resource_plan_presult__isset { + _ThriftHiveMetastore_create_resource_plan_presult__isset() : o1(false), o2(false) {} + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_create_resource_plan_presult__isset; + +class ThriftHiveMetastore_create_resource_plan_presult { + public: + + + virtual ~ThriftHiveMetastore_create_resource_plan_presult() throw(); + InvalidObjectException o1; + MetaException o2; + + _ThriftHiveMetastore_create_resource_plan_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_get_resource_plan_args__isset { + _ThriftHiveMetastore_get_resource_plan_args__isset() : name(false) {} + bool name :1; +} _ThriftHiveMetastore_get_resource_plan_args__isset; + +class ThriftHiveMetastore_get_resource_plan_args { + public: + + ThriftHiveMetastore_get_resource_plan_args(const ThriftHiveMetastore_get_resource_plan_args&); + ThriftHiveMetastore_get_resource_plan_args& operator=(const ThriftHiveMetastore_get_resource_plan_args&); + ThriftHiveMetastore_get_resource_plan_args() : name() { + } + + virtual ~ThriftHiveMetastore_get_resource_plan_args() throw(); + std::string name; + + _ThriftHiveMetastore_get_resource_plan_args__isset __isset; + + void __set_name(const std::string& val); + + bool operator == (const ThriftHiveMetastore_get_resource_plan_args & rhs) const + { + if (!(name == rhs.name)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_resource_plan_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_resource_plan_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_resource_plan_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_resource_plan_pargs() throw(); + const std::string* name; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_resource_plan_result__isset { + _ThriftHiveMetastore_get_resource_plan_result__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_resource_plan_result__isset; + +class ThriftHiveMetastore_get_resource_plan_result { + public: + + ThriftHiveMetastore_get_resource_plan_result(const ThriftHiveMetastore_get_resource_plan_result&); + ThriftHiveMetastore_get_resource_plan_result& operator=(const ThriftHiveMetastore_get_resource_plan_result&); + ThriftHiveMetastore_get_resource_plan_result() { + } + + virtual ~ThriftHiveMetastore_get_resource_plan_result() throw(); + WMResourcePlan success; + NoSuchObjectException o1; + MetaException o2; + + _ThriftHiveMetastore_get_resource_plan_result__isset __isset; + + void __set_success(const WMResourcePlan& val); + + void __set_o1(const NoSuchObjectException& val); + + void __set_o2(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_get_resource_plan_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_resource_plan_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_resource_plan_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_resource_plan_presult__isset { + _ThriftHiveMetastore_get_resource_plan_presult__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_resource_plan_presult__isset; + +class ThriftHiveMetastore_get_resource_plan_presult { + public: + + + virtual ~ThriftHiveMetastore_get_resource_plan_presult() throw(); + WMResourcePlan* success; + NoSuchObjectException o1; + MetaException o2; + + _ThriftHiveMetastore_get_resource_plan_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + + +class ThriftHiveMetastore_get_all_resource_plans_args { + public: + + ThriftHiveMetastore_get_all_resource_plans_args(const ThriftHiveMetastore_get_all_resource_plans_args&); + ThriftHiveMetastore_get_all_resource_plans_args& operator=(const ThriftHiveMetastore_get_all_resource_plans_args&); + ThriftHiveMetastore_get_all_resource_plans_args() { + } + + virtual ~ThriftHiveMetastore_get_all_resource_plans_args() throw(); + + bool operator == (const ThriftHiveMetastore_get_all_resource_plans_args & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHiveMetastore_get_all_resource_plans_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_all_resource_plans_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_all_resource_plans_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_all_resource_plans_pargs() throw(); + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_all_resource_plans_result__isset { + _ThriftHiveMetastore_get_all_resource_plans_result__isset() : success(false), o1(false) {} + bool success :1; + bool o1 :1; +} _ThriftHiveMetastore_get_all_resource_plans_result__isset; + +class ThriftHiveMetastore_get_all_resource_plans_result { + public: + + ThriftHiveMetastore_get_all_resource_plans_result(const ThriftHiveMetastore_get_all_resource_plans_result&); + ThriftHiveMetastore_get_all_resource_plans_result& operator=(const ThriftHiveMetastore_get_all_resource_plans_result&); + ThriftHiveMetastore_get_all_resource_plans_result() { + } + + virtual ~ThriftHiveMetastore_get_all_resource_plans_result() throw(); + std::vector success; + MetaException o1; + + _ThriftHiveMetastore_get_all_resource_plans_result__isset __isset; + + void __set_success(const std::vector & val); + + void __set_o1(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_get_all_resource_plans_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_all_resource_plans_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_all_resource_plans_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_all_resource_plans_presult__isset { + _ThriftHiveMetastore_get_all_resource_plans_presult__isset() : success(false), o1(false) {} + bool success :1; + bool o1 :1; +} _ThriftHiveMetastore_get_all_resource_plans_presult__isset; + +class ThriftHiveMetastore_get_all_resource_plans_presult { + public: + + + virtual ~ThriftHiveMetastore_get_all_resource_plans_presult() throw(); + std::vector * success; + MetaException o1; + + _ThriftHiveMetastore_get_all_resource_plans_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_alter_resource_plan_args__isset { + _ThriftHiveMetastore_alter_resource_plan_args__isset() : name(false), resourcePlan(false) {} + bool name :1; + bool resourcePlan :1; +} _ThriftHiveMetastore_alter_resource_plan_args__isset; + +class ThriftHiveMetastore_alter_resource_plan_args { + public: + + ThriftHiveMetastore_alter_resource_plan_args(const ThriftHiveMetastore_alter_resource_plan_args&); + ThriftHiveMetastore_alter_resource_plan_args& operator=(const ThriftHiveMetastore_alter_resource_plan_args&); + ThriftHiveMetastore_alter_resource_plan_args() : name() { + } + + virtual ~ThriftHiveMetastore_alter_resource_plan_args() throw(); + std::string name; + WMResourcePlan resourcePlan; + + _ThriftHiveMetastore_alter_resource_plan_args__isset __isset; + + void __set_name(const std::string& val); + + void __set_resourcePlan(const WMResourcePlan& val); + + bool operator == (const ThriftHiveMetastore_alter_resource_plan_args & rhs) const + { + if (!(name == rhs.name)) + return false; + if (!(resourcePlan == rhs.resourcePlan)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_alter_resource_plan_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_alter_resource_plan_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_alter_resource_plan_pargs { + public: + + + virtual ~ThriftHiveMetastore_alter_resource_plan_pargs() throw(); + const std::string* name; + const WMResourcePlan* resourcePlan; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_alter_resource_plan_result__isset { + _ThriftHiveMetastore_alter_resource_plan_result__isset() : o1(false), o3(false), o2(false) {} + bool o1 :1; + bool o3 :1; + bool o2 :1; +} _ThriftHiveMetastore_alter_resource_plan_result__isset; + +class ThriftHiveMetastore_alter_resource_plan_result { + public: + + ThriftHiveMetastore_alter_resource_plan_result(const ThriftHiveMetastore_alter_resource_plan_result&); + ThriftHiveMetastore_alter_resource_plan_result& operator=(const ThriftHiveMetastore_alter_resource_plan_result&); + ThriftHiveMetastore_alter_resource_plan_result() { + } + + virtual ~ThriftHiveMetastore_alter_resource_plan_result() throw(); + NoSuchObjectException o1; + InvalidOperationException o3; + MetaException o2; + + _ThriftHiveMetastore_alter_resource_plan_result__isset __isset; + + void __set_o1(const NoSuchObjectException& val); + + void __set_o3(const InvalidOperationException& val); + + void __set_o2(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_alter_resource_plan_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o3 == rhs.o3)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_alter_resource_plan_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_alter_resource_plan_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_alter_resource_plan_presult__isset { + _ThriftHiveMetastore_alter_resource_plan_presult__isset() : o1(false), o3(false), o2(false) {} + bool o1 :1; + bool o3 :1; + bool o2 :1; +} _ThriftHiveMetastore_alter_resource_plan_presult__isset; + +class ThriftHiveMetastore_alter_resource_plan_presult { + public: + + + virtual ~ThriftHiveMetastore_alter_resource_plan_presult() throw(); + NoSuchObjectException o1; + InvalidOperationException o3; + MetaException o2; + + _ThriftHiveMetastore_alter_resource_plan_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_validate_resource_plan_args__isset { + _ThriftHiveMetastore_validate_resource_plan_args__isset() : name(false) {} + bool name :1; +} _ThriftHiveMetastore_validate_resource_plan_args__isset; + +class ThriftHiveMetastore_validate_resource_plan_args { + public: + + ThriftHiveMetastore_validate_resource_plan_args(const ThriftHiveMetastore_validate_resource_plan_args&); + ThriftHiveMetastore_validate_resource_plan_args& operator=(const ThriftHiveMetastore_validate_resource_plan_args&); + ThriftHiveMetastore_validate_resource_plan_args() : name() { + } + + virtual ~ThriftHiveMetastore_validate_resource_plan_args() throw(); + std::string name; + + _ThriftHiveMetastore_validate_resource_plan_args__isset __isset; + + void __set_name(const std::string& val); + + bool operator == (const ThriftHiveMetastore_validate_resource_plan_args & rhs) const + { + if (!(name == rhs.name)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_validate_resource_plan_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_validate_resource_plan_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_validate_resource_plan_pargs { + public: + + + virtual ~ThriftHiveMetastore_validate_resource_plan_pargs() throw(); + const std::string* name; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_validate_resource_plan_result__isset { + _ThriftHiveMetastore_validate_resource_plan_result__isset() : o1(false), o3(false) {} + bool o1 :1; + bool o3 :1; +} _ThriftHiveMetastore_validate_resource_plan_result__isset; + +class ThriftHiveMetastore_validate_resource_plan_result { + public: + + ThriftHiveMetastore_validate_resource_plan_result(const ThriftHiveMetastore_validate_resource_plan_result&); + ThriftHiveMetastore_validate_resource_plan_result& operator=(const ThriftHiveMetastore_validate_resource_plan_result&); + ThriftHiveMetastore_validate_resource_plan_result() { + } + + virtual ~ThriftHiveMetastore_validate_resource_plan_result() throw(); + NoSuchObjectException o1; + MetaException o3; + + _ThriftHiveMetastore_validate_resource_plan_result__isset __isset; + + void __set_o1(const NoSuchObjectException& val); + + void __set_o3(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_validate_resource_plan_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o3 == rhs.o3)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_validate_resource_plan_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_validate_resource_plan_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_validate_resource_plan_presult__isset { + _ThriftHiveMetastore_validate_resource_plan_presult__isset() : o1(false), o3(false) {} + bool o1 :1; + bool o3 :1; +} _ThriftHiveMetastore_validate_resource_plan_presult__isset; + +class ThriftHiveMetastore_validate_resource_plan_presult { + public: + + + virtual ~ThriftHiveMetastore_validate_resource_plan_presult() throw(); + NoSuchObjectException o1; + MetaException o3; + + _ThriftHiveMetastore_validate_resource_plan_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_drop_resource_plan_args__isset { + _ThriftHiveMetastore_drop_resource_plan_args__isset() : name(false) {} + bool name :1; +} _ThriftHiveMetastore_drop_resource_plan_args__isset; + +class ThriftHiveMetastore_drop_resource_plan_args { + public: + + ThriftHiveMetastore_drop_resource_plan_args(const ThriftHiveMetastore_drop_resource_plan_args&); + ThriftHiveMetastore_drop_resource_plan_args& operator=(const ThriftHiveMetastore_drop_resource_plan_args&); + ThriftHiveMetastore_drop_resource_plan_args() : name() { + } + + virtual ~ThriftHiveMetastore_drop_resource_plan_args() throw(); + std::string name; + + _ThriftHiveMetastore_drop_resource_plan_args__isset __isset; + + void __set_name(const std::string& val); + + bool operator == (const ThriftHiveMetastore_drop_resource_plan_args & rhs) const + { + if (!(name == rhs.name)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_resource_plan_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_resource_plan_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_drop_resource_plan_pargs { + public: + + + virtual ~ThriftHiveMetastore_drop_resource_plan_pargs() throw(); + const std::string* name; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_resource_plan_result__isset { + _ThriftHiveMetastore_drop_resource_plan_result__isset() : o1(false), o2(false) {} + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_drop_resource_plan_result__isset; + +class ThriftHiveMetastore_drop_resource_plan_result { + public: + + ThriftHiveMetastore_drop_resource_plan_result(const ThriftHiveMetastore_drop_resource_plan_result&); + ThriftHiveMetastore_drop_resource_plan_result& operator=(const ThriftHiveMetastore_drop_resource_plan_result&); + ThriftHiveMetastore_drop_resource_plan_result() { + } + + virtual ~ThriftHiveMetastore_drop_resource_plan_result() throw(); + NoSuchObjectException o1; + MetaException o2; + + _ThriftHiveMetastore_drop_resource_plan_result__isset __isset; + + void __set_o1(const NoSuchObjectException& val); + + void __set_o2(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_drop_resource_plan_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_resource_plan_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_resource_plan_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_resource_plan_presult__isset { + _ThriftHiveMetastore_drop_resource_plan_presult__isset() : o1(false), o2(false) {} + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_drop_resource_plan_presult__isset; + +class ThriftHiveMetastore_drop_resource_plan_presult { + public: + + + virtual ~ThriftHiveMetastore_drop_resource_plan_presult() throw(); + NoSuchObjectException o1; + MetaException o2; + + _ThriftHiveMetastore_drop_resource_plan_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public ::facebook::fb303::FacebookServiceClient { + public: + ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) : + ::facebook::fb303::FacebookServiceClient(prot, prot) {} + ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) : ::facebook::fb303::FacebookServiceClient(iprot, oprot) {} + boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() { + return piprot_; + } + boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() { + return poprot_; + } + void getMetaConf(std::string& _return, const std::string& key); + void send_getMetaConf(const std::string& key); + void recv_getMetaConf(std::string& _return); + void setMetaConf(const std::string& key, const std::string& value); + void send_setMetaConf(const std::string& key, const std::string& value); + void recv_setMetaConf(); + void create_database(const Database& database); + void send_create_database(const Database& database); + void recv_create_database(); + void get_database(Database& _return, const std::string& name); + void send_get_database(const std::string& name); + void recv_get_database(Database& _return); + void drop_database(const std::string& name, const bool deleteData, const bool cascade); + void send_drop_database(const std::string& name, const bool deleteData, const bool cascade); + void recv_drop_database(); + void get_databases(std::vector & _return, const std::string& pattern); + void send_get_databases(const std::string& pattern); + void recv_get_databases(std::vector & _return); + void get_all_databases(std::vector & _return); + void send_get_all_databases(); + void recv_get_all_databases(std::vector & _return); + void alter_database(const std::string& dbname, const Database& db); + void send_alter_database(const std::string& dbname, const Database& db); + void recv_alter_database(); + void get_type(Type& _return, const std::string& name); + void send_get_type(const std::string& name); + void recv_get_type(Type& _return); + bool create_type(const Type& type); + void send_create_type(const Type& type); + bool recv_create_type(); + bool drop_type(const std::string& type); + void send_drop_type(const std::string& type); + bool recv_drop_type(); + void get_type_all(std::map & _return, const std::string& name); + void send_get_type_all(const std::string& name); + void recv_get_type_all(std::map & _return); + void get_fields(std::vector & _return, const std::string& db_name, const std::string& table_name); + void send_get_fields(const std::string& db_name, const std::string& table_name); + void recv_get_fields(std::vector & _return); + void get_fields_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); + void send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); + void recv_get_fields_with_environment_context(std::vector & _return); + void get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name); + void send_get_schema(const std::string& db_name, const std::string& table_name); + void recv_get_schema(std::vector & _return); + void get_schema_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); + void send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context); + void recv_get_schema_with_environment_context(std::vector & _return); + void create_table(const Table& tbl); + void send_create_table(const Table& tbl); + void recv_create_table(); + void create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context); + void send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context); + void recv_create_table_with_environment_context(); + void create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints); + void send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints); + void recv_create_table_with_constraints(); + void drop_constraint(const DropConstraintRequest& req); + void send_drop_constraint(const DropConstraintRequest& req); + void recv_drop_constraint(); + void add_primary_key(const AddPrimaryKeyRequest& req); + void send_add_primary_key(const AddPrimaryKeyRequest& req); + void recv_add_primary_key(); + void add_foreign_key(const AddForeignKeyRequest& req); + void send_add_foreign_key(const AddForeignKeyRequest& req); + void recv_add_foreign_key(); + void add_unique_constraint(const AddUniqueConstraintRequest& req); + void send_add_unique_constraint(const AddUniqueConstraintRequest& req); + void recv_add_unique_constraint(); + void add_not_null_constraint(const AddNotNullConstraintRequest& req); + void send_add_not_null_constraint(const AddNotNullConstraintRequest& req); + void recv_add_not_null_constraint(); + void drop_table(const std::string& dbname, const std::string& name, const bool deleteData); + void send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData); + void recv_drop_table(); + void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context); + void send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context); + void recv_drop_table_with_environment_context(); + void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames); + void send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames); + void recv_truncate_table(); + void get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern); + void send_get_tables(const std::string& db_name, const std::string& pattern); + void recv_get_tables(std::vector & _return); + void get_tables_by_type(std::vector & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType); + void send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType); + void recv_get_tables_by_type(std::vector & _return); + void get_table_meta(std::vector & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types); + void send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types); + void recv_get_table_meta(std::vector & _return); + void get_all_tables(std::vector & _return, const std::string& db_name); + void send_get_all_tables(const std::string& db_name); + void recv_get_all_tables(std::vector & _return); + void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name); + void send_get_table(const std::string& dbname, const std::string& tbl_name); + void recv_get_table(Table& _return); + void get_table_objects_by_name(std::vector
& _return, const std::string& dbname, const std::vector & tbl_names); + void send_get_table_objects_by_name(const std::string& dbname, const std::vector & tbl_names); + void recv_get_table_objects_by_name(std::vector
& _return); + void get_table_req(GetTableResult& _return, const GetTableRequest& req); + void send_get_table_req(const GetTableRequest& req); + void recv_get_table_req(GetTableResult& _return); + void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req); + void send_get_table_objects_by_name_req(const GetTablesRequest& req); + void recv_get_table_objects_by_name_req(GetTablesResult& _return); + void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables); + void send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables); + void recv_get_table_names_by_filter(std::vector & _return); + void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl); + void send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl); + void recv_alter_table(); + void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context); + void send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context); + void recv_alter_table_with_environment_context(); + void alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade); + void send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade); + void recv_alter_table_with_cascade(); + void add_partition(Partition& _return, const Partition& new_part); + void send_add_partition(const Partition& new_part); + void recv_add_partition(Partition& _return); + void add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context); + void send_add_partition_with_environment_context(const Partition& new_part, const EnvironmentContext& environment_context); + void recv_add_partition_with_environment_context(Partition& _return); + int32_t add_partitions(const std::vector & new_parts); + void send_add_partitions(const std::vector & new_parts); + int32_t recv_add_partitions(); + int32_t add_partitions_pspec(const std::vector & new_parts); + void send_add_partitions_pspec(const std::vector & new_parts); + int32_t recv_add_partitions_pspec(); + void append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); + void send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); + void recv_append_partition(Partition& _return); + void add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request); + void send_add_partitions_req(const AddPartitionsRequest& request); + void recv_add_partitions_req(AddPartitionsResult& _return); + void append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context); + void send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context); + void recv_append_partition_with_environment_context(Partition& _return); + void append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name); + void send_append_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name); + void recv_append_partition_by_name(Partition& _return); + void append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context); + void send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context); + void recv_append_partition_by_name_with_environment_context(Partition& _return); + bool drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData); + void send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData); + bool recv_drop_partition(); + bool drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context); + void send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context); + bool recv_drop_partition_with_environment_context(); + bool drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData); + void send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData); + bool recv_drop_partition_by_name(); + bool drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context); + void send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context); + bool recv_drop_partition_by_name_with_environment_context(); + void drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req); + void send_drop_partitions_req(const DropPartitionsRequest& req); + void recv_drop_partitions_req(DropPartitionsResult& _return); + void get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); + void send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals); + void recv_get_partition(Partition& _return); + void exchange_partition(Partition& _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); + void send_exchange_partition(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); + void recv_exchange_partition(Partition& _return); + void exchange_partitions(std::vector & _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); + void send_exchange_partitions(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name); + void recv_exchange_partitions(std::vector & _return); + void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names); + void send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names); + void recv_get_partition_with_auth(Partition& _return); + void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name); + void send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name); + void recv_get_partition_by_name(Partition& _return); + void get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); + void send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); + void recv_get_partitions(std::vector & _return); + void get_partitions_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); + void send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); + void recv_get_partitions_with_auth(std::vector & _return); + void get_partitions_pspec(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts); + void send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts); + void recv_get_partitions_pspec(std::vector & _return); + void get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); + void send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts); + void recv_get_partition_names(std::vector & _return); + void get_partition_values(PartitionValuesResponse& _return, const PartitionValuesRequest& request); + void send_get_partition_values(const PartitionValuesRequest& request); + void recv_get_partition_values(PartitionValuesResponse& _return); + void get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); + void send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); + void recv_get_partitions_ps(std::vector & _return); + void get_partitions_ps_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); + void send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names); + void recv_get_partitions_ps_with_auth(std::vector & _return); + void get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); + void send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts); + void recv_get_partition_names_ps(std::vector & _return); + void get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts); + void send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts); + void recv_get_partitions_by_filter(std::vector & _return); + void get_part_specs_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts); + void send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts); + void recv_get_part_specs_by_filter(std::vector & _return); + void get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req); + void send_get_partitions_by_expr(const PartitionsByExprRequest& req); + void recv_get_partitions_by_expr(PartitionsByExprResult& _return); + int32_t get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter); + void send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter); + int32_t recv_get_num_partitions_by_filter(); + void get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names); + void send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names); + void recv_get_partitions_by_names(std::vector & _return); + void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part); + void send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part); + void recv_alter_partition(); + void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); + void send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); + void recv_alter_partitions(); + void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); + void send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); + void recv_alter_partitions_with_environment_context(); + void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); + void send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); + void recv_alter_partition_with_environment_context(); + void rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part); + void send_rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part); void recv_rename_partition(); bool partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception); void send_partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception); @@ -21174,6 +21881,24 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void get_metastore_db_uuid(std::string& _return); void send_get_metastore_db_uuid(); void recv_get_metastore_db_uuid(std::string& _return); + void create_resource_plan(const WMResourcePlan& resourcePlan); + void send_create_resource_plan(const WMResourcePlan& resourcePlan); + void recv_create_resource_plan(); + void get_resource_plan(WMResourcePlan& _return, const std::string& name); + void send_get_resource_plan(const std::string& name); + void recv_get_resource_plan(WMResourcePlan& _return); + void get_all_resource_plans(std::vector & _return); + void send_get_all_resource_plans(); + void recv_get_all_resource_plans(std::vector & _return); + void alter_resource_plan(const std::string& name, const WMResourcePlan& resourcePlan); + void send_alter_resource_plan(const std::string& name, const WMResourcePlan& resourcePlan); + void recv_alter_resource_plan(); + void validate_resource_plan(const std::string& name); + void send_validate_resource_plan(const std::string& name); + void recv_validate_resource_plan(); + void drop_resource_plan(const std::string& name); + void send_drop_resource_plan(const std::string& name); + void recv_drop_resource_plan(); }; class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceProcessor { @@ -21346,6 +22071,12 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_clear_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_cache_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_metastore_db_uuid(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_create_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_all_resource_plans(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_alter_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_validate_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_drop_resource_plan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); public: ThriftHiveMetastoreProcessor(boost::shared_ptr iface) : ::facebook::fb303::FacebookServiceProcessor(iface), @@ -21512,6 +22243,12 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["clear_file_metadata"] = &ThriftHiveMetastoreProcessor::process_clear_file_metadata; processMap_["cache_file_metadata"] = &ThriftHiveMetastoreProcessor::process_cache_file_metadata; processMap_["get_metastore_db_uuid"] = &ThriftHiveMetastoreProcessor::process_get_metastore_db_uuid; + processMap_["create_resource_plan"] = &ThriftHiveMetastoreProcessor::process_create_resource_plan; + processMap_["get_resource_plan"] = &ThriftHiveMetastoreProcessor::process_get_resource_plan; + processMap_["get_all_resource_plans"] = &ThriftHiveMetastoreProcessor::process_get_all_resource_plans; + processMap_["alter_resource_plan"] = &ThriftHiveMetastoreProcessor::process_alter_resource_plan; + processMap_["validate_resource_plan"] = &ThriftHiveMetastoreProcessor::process_validate_resource_plan; + processMap_["drop_resource_plan"] = &ThriftHiveMetastoreProcessor::process_drop_resource_plan; } virtual ~ThriftHiveMetastoreProcessor() {} @@ -23099,6 +23836,62 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi return; } + void create_resource_plan(const WMResourcePlan& resourcePlan) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->create_resource_plan(resourcePlan); + } + ifaces_[i]->create_resource_plan(resourcePlan); + } + + void get_resource_plan(WMResourcePlan& _return, const std::string& name) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_resource_plan(_return, name); + } + ifaces_[i]->get_resource_plan(_return, name); + return; + } + + void get_all_resource_plans(std::vector & _return) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_all_resource_plans(_return); + } + ifaces_[i]->get_all_resource_plans(_return); + return; + } + + void alter_resource_plan(const std::string& name, const WMResourcePlan& resourcePlan) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->alter_resource_plan(name, resourcePlan); + } + ifaces_[i]->alter_resource_plan(name, resourcePlan); + } + + void validate_resource_plan(const std::string& name) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->validate_resource_plan(name); + } + ifaces_[i]->validate_resource_plan(name); + } + + void drop_resource_plan(const std::string& name) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->drop_resource_plan(name); + } + ifaces_[i]->drop_resource_plan(name); + } + }; // The 'concurrent' client is a thread safe client that correctly handles @@ -23601,6 +24394,24 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void get_metastore_db_uuid(std::string& _return); int32_t send_get_metastore_db_uuid(); void recv_get_metastore_db_uuid(std::string& _return, const int32_t seqid); + void create_resource_plan(const WMResourcePlan& resourcePlan); + int32_t send_create_resource_plan(const WMResourcePlan& resourcePlan); + void recv_create_resource_plan(const int32_t seqid); + void get_resource_plan(WMResourcePlan& _return, const std::string& name); + int32_t send_get_resource_plan(const std::string& name); + void recv_get_resource_plan(WMResourcePlan& _return, const int32_t seqid); + void get_all_resource_plans(std::vector & _return); + int32_t send_get_all_resource_plans(); + void recv_get_all_resource_plans(std::vector & _return, const int32_t seqid); + void alter_resource_plan(const std::string& name, const WMResourcePlan& resourcePlan); + int32_t send_alter_resource_plan(const std::string& name, const WMResourcePlan& resourcePlan); + void recv_alter_resource_plan(const int32_t seqid); + void validate_resource_plan(const std::string& name); + int32_t send_validate_resource_plan(const std::string& name); + void recv_validate_resource_plan(const int32_t seqid); + void drop_resource_plan(const std::string& name); + int32_t send_drop_resource_plan(const std::string& name); + void recv_drop_resource_plan(const int32_t seqid); }; #ifdef _WIN32 diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index 78efd63205..662e11f10a 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -832,6 +832,36 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("get_metastore_db_uuid\n"); } + void create_resource_plan(const WMResourcePlan& resourcePlan) { + // Your implementation goes here + printf("create_resource_plan\n"); + } + + void get_resource_plan(WMResourcePlan& _return, const std::string& name) { + // Your implementation goes here + printf("get_resource_plan\n"); + } + + void get_all_resource_plans(std::vector & _return) { + // Your implementation goes here + printf("get_all_resource_plans\n"); + } + + void alter_resource_plan(const std::string& name, const WMResourcePlan& resourcePlan) { + // Your implementation goes here + printf("alter_resource_plan\n"); + } + + void validate_resource_plan(const std::string& name) { + // Your implementation goes here + printf("validate_resource_plan\n"); + } + + void drop_resource_plan(const std::string& name) { + // Your implementation goes here + printf("drop_resource_plan\n"); + } + }; int main(int argc, char **argv) { diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 5ea5fc4ad2..0b8a18ca07 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -366,6 +366,18 @@ public String get_metastore_db_uuid() throws MetaException, org.apache.thrift.TException; + public void create_resource_plan(WMResourcePlan resourcePlan) throws InvalidObjectException, MetaException, org.apache.thrift.TException; + + public WMResourcePlan get_resource_plan(String name) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + + public List get_all_resource_plans() throws MetaException, org.apache.thrift.TException; + + public void alter_resource_plan(String name, WMResourcePlan resourcePlan) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException; + + public void validate_resource_plan(String name) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + + public void drop_resource_plan(String name) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + } public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface { @@ -694,6 +706,18 @@ public void get_metastore_db_uuid(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void create_resource_plan(WMResourcePlan resourcePlan, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void get_resource_plan(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void get_all_resource_plans(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void alter_resource_plan(String name, WMResourcePlan resourcePlan, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void validate_resource_plan(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void drop_resource_plan(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + } public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface { @@ -5361,6 +5385,168 @@ public String recv_get_metastore_db_uuid() throws MetaException, org.apache.thri throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_metastore_db_uuid failed: unknown result"); } + public void create_resource_plan(WMResourcePlan resourcePlan) throws InvalidObjectException, MetaException, org.apache.thrift.TException + { + send_create_resource_plan(resourcePlan); + recv_create_resource_plan(); + } + + public void send_create_resource_plan(WMResourcePlan resourcePlan) throws org.apache.thrift.TException + { + create_resource_plan_args args = new create_resource_plan_args(); + args.setResourcePlan(resourcePlan); + sendBase("create_resource_plan", args); + } + + public void recv_create_resource_plan() throws InvalidObjectException, MetaException, org.apache.thrift.TException + { + create_resource_plan_result result = new create_resource_plan_result(); + receiveBase(result, "create_resource_plan"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + return; + } + + public WMResourcePlan get_resource_plan(String name) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + { + send_get_resource_plan(name); + return recv_get_resource_plan(); + } + + public void send_get_resource_plan(String name) throws org.apache.thrift.TException + { + get_resource_plan_args args = new get_resource_plan_args(); + args.setName(name); + sendBase("get_resource_plan", args); + } + + public WMResourcePlan recv_get_resource_plan() throws NoSuchObjectException, MetaException, org.apache.thrift.TException + { + get_resource_plan_result result = new get_resource_plan_result(); + receiveBase(result, "get_resource_plan"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_resource_plan failed: unknown result"); + } + + public List get_all_resource_plans() throws MetaException, org.apache.thrift.TException + { + send_get_all_resource_plans(); + return recv_get_all_resource_plans(); + } + + public void send_get_all_resource_plans() throws org.apache.thrift.TException + { + get_all_resource_plans_args args = new get_all_resource_plans_args(); + sendBase("get_all_resource_plans", args); + } + + public List recv_get_all_resource_plans() throws MetaException, org.apache.thrift.TException + { + get_all_resource_plans_result result = new get_all_resource_plans_result(); + receiveBase(result, "get_all_resource_plans"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_all_resource_plans failed: unknown result"); + } + + public void alter_resource_plan(String name, WMResourcePlan resourcePlan) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException + { + send_alter_resource_plan(name, resourcePlan); + recv_alter_resource_plan(); + } + + public void send_alter_resource_plan(String name, WMResourcePlan resourcePlan) throws org.apache.thrift.TException + { + alter_resource_plan_args args = new alter_resource_plan_args(); + args.setName(name); + args.setResourcePlan(resourcePlan); + sendBase("alter_resource_plan", args); + } + + public void recv_alter_resource_plan() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException + { + alter_resource_plan_result result = new alter_resource_plan_result(); + receiveBase(result, "alter_resource_plan"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o3 != null) { + throw result.o3; + } + if (result.o2 != null) { + throw result.o2; + } + return; + } + + public void validate_resource_plan(String name) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + { + send_validate_resource_plan(name); + recv_validate_resource_plan(); + } + + public void send_validate_resource_plan(String name) throws org.apache.thrift.TException + { + validate_resource_plan_args args = new validate_resource_plan_args(); + args.setName(name); + sendBase("validate_resource_plan", args); + } + + public void recv_validate_resource_plan() throws NoSuchObjectException, MetaException, org.apache.thrift.TException + { + validate_resource_plan_result result = new validate_resource_plan_result(); + receiveBase(result, "validate_resource_plan"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o3 != null) { + throw result.o3; + } + return; + } + + public void drop_resource_plan(String name) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + { + send_drop_resource_plan(name); + recv_drop_resource_plan(); + } + + public void send_drop_resource_plan(String name) throws org.apache.thrift.TException + { + drop_resource_plan_args args = new drop_resource_plan_args(); + args.setName(name); + sendBase("drop_resource_plan", args); + } + + public void recv_drop_resource_plan() throws NoSuchObjectException, MetaException, org.apache.thrift.TException + { + drop_resource_plan_result result = new drop_resource_plan_result(); + receiveBase(result, "drop_resource_plan"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + return; + } + } public static class AsyncClient extends com.facebook.fb303.FacebookService.AsyncClient implements AsyncIface { public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { @@ -11061,6 +11247,198 @@ public String getResult() throws MetaException, org.apache.thrift.TException { } } + public void create_resource_plan(WMResourcePlan resourcePlan, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + create_resource_plan_call method_call = new create_resource_plan_call(resourcePlan, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class create_resource_plan_call extends org.apache.thrift.async.TAsyncMethodCall { + private WMResourcePlan resourcePlan; + public create_resource_plan_call(WMResourcePlan resourcePlan, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.resourcePlan = resourcePlan; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("create_resource_plan", org.apache.thrift.protocol.TMessageType.CALL, 0)); + create_resource_plan_args args = new create_resource_plan_args(); + args.setResourcePlan(resourcePlan); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws InvalidObjectException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_create_resource_plan(); + } + } + + public void get_resource_plan(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_resource_plan_call method_call = new get_resource_plan_call(name, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class get_resource_plan_call extends org.apache.thrift.async.TAsyncMethodCall { + private String name; + public get_resource_plan_call(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.name = name; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_resource_plan", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_resource_plan_args args = new get_resource_plan_args(); + args.setName(name); + args.write(prot); + prot.writeMessageEnd(); + } + + public WMResourcePlan getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_resource_plan(); + } + } + + public void get_all_resource_plans(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_all_resource_plans_call method_call = new get_all_resource_plans_call(resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class get_all_resource_plans_call extends org.apache.thrift.async.TAsyncMethodCall { + public get_all_resource_plans_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_all_resource_plans", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_all_resource_plans_args args = new get_all_resource_plans_args(); + args.write(prot); + prot.writeMessageEnd(); + } + + public List getResult() throws MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_all_resource_plans(); + } + } + + public void alter_resource_plan(String name, WMResourcePlan resourcePlan, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + alter_resource_plan_call method_call = new alter_resource_plan_call(name, resourcePlan, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class alter_resource_plan_call extends org.apache.thrift.async.TAsyncMethodCall { + private String name; + private WMResourcePlan resourcePlan; + public alter_resource_plan_call(String name, WMResourcePlan resourcePlan, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.name = name; + this.resourcePlan = resourcePlan; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("alter_resource_plan", org.apache.thrift.protocol.TMessageType.CALL, 0)); + alter_resource_plan_args args = new alter_resource_plan_args(); + args.setName(name); + args.setResourcePlan(resourcePlan); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_alter_resource_plan(); + } + } + + public void validate_resource_plan(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + validate_resource_plan_call method_call = new validate_resource_plan_call(name, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class validate_resource_plan_call extends org.apache.thrift.async.TAsyncMethodCall { + private String name; + public validate_resource_plan_call(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.name = name; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("validate_resource_plan", org.apache.thrift.protocol.TMessageType.CALL, 0)); + validate_resource_plan_args args = new validate_resource_plan_args(); + args.setName(name); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_validate_resource_plan(); + } + } + + public void drop_resource_plan(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + drop_resource_plan_call method_call = new drop_resource_plan_call(name, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class drop_resource_plan_call extends org.apache.thrift.async.TAsyncMethodCall { + private String name; + public drop_resource_plan_call(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.name = name; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("drop_resource_plan", org.apache.thrift.protocol.TMessageType.CALL, 0)); + drop_resource_plan_args args = new drop_resource_plan_args(); + args.setName(name); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_drop_resource_plan(); + } + } + } public static class Processor extends com.facebook.fb303.FacebookService.Processor implements org.apache.thrift.TProcessor { @@ -11236,6 +11614,12 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public create_resource_plan() { + super("create_resource_plan"); + } + + public create_resource_plan_args getEmptyArgsInstance() { + return new create_resource_plan_args(); + } + + protected boolean isOneway() { + return false; + } + + public create_resource_plan_result getResult(I iface, create_resource_plan_args args) throws org.apache.thrift.TException { + create_resource_plan_result result = new create_resource_plan_result(); + try { + iface.create_resource_plan(args.resourcePlan); + } catch (InvalidObjectException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } + return result; + } + } + + public static class get_resource_plan extends org.apache.thrift.ProcessFunction { + public get_resource_plan() { + super("get_resource_plan"); + } + + public get_resource_plan_args getEmptyArgsInstance() { + return new get_resource_plan_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_resource_plan_result getResult(I iface, get_resource_plan_args args) throws org.apache.thrift.TException { + get_resource_plan_result result = new get_resource_plan_result(); + try { + result.success = iface.get_resource_plan(args.name); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } + return result; + } + } + + public static class get_all_resource_plans extends org.apache.thrift.ProcessFunction { + public get_all_resource_plans() { + super("get_all_resource_plans"); + } + + public get_all_resource_plans_args getEmptyArgsInstance() { + return new get_all_resource_plans_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_all_resource_plans_result getResult(I iface, get_all_resource_plans_args args) throws org.apache.thrift.TException { + get_all_resource_plans_result result = new get_all_resource_plans_result(); + try { + result.success = iface.get_all_resource_plans(); + } catch (MetaException o1) { + result.o1 = o1; + } + return result; + } + } + + public static class alter_resource_plan extends org.apache.thrift.ProcessFunction { + public alter_resource_plan() { + super("alter_resource_plan"); + } + + public alter_resource_plan_args getEmptyArgsInstance() { + return new alter_resource_plan_args(); + } + + protected boolean isOneway() { + return false; + } + + public alter_resource_plan_result getResult(I iface, alter_resource_plan_args args) throws org.apache.thrift.TException { + alter_resource_plan_result result = new alter_resource_plan_result(); + try { + iface.alter_resource_plan(args.name, args.resourcePlan); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (InvalidOperationException o3) { + result.o3 = o3; + } catch (MetaException o2) { + result.o2 = o2; + } + return result; + } + } + + public static class validate_resource_plan extends org.apache.thrift.ProcessFunction { + public validate_resource_plan() { + super("validate_resource_plan"); + } + + public validate_resource_plan_args getEmptyArgsInstance() { + return new validate_resource_plan_args(); + } + + protected boolean isOneway() { + return false; + } + + public validate_resource_plan_result getResult(I iface, validate_resource_plan_args args) throws org.apache.thrift.TException { + validate_resource_plan_result result = new validate_resource_plan_result(); + try { + iface.validate_resource_plan(args.name); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (MetaException o3) { + result.o3 = o3; + } + return result; + } + } + + public static class drop_resource_plan extends org.apache.thrift.ProcessFunction { + public drop_resource_plan() { + super("drop_resource_plan"); + } + + public drop_resource_plan_args getEmptyArgsInstance() { + return new drop_resource_plan_args(); + } + + protected boolean isOneway() { + return false; + } + + public drop_resource_plan_result getResult(I iface, drop_resource_plan_args args) throws org.apache.thrift.TException { + drop_resource_plan_result result = new drop_resource_plan_result(); + try { + iface.drop_resource_plan(args.name); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } + return result; + } + } + } public static class AsyncProcessor extends com.facebook.fb303.FacebookService.AsyncProcessor { @@ -15534,6 +16074,12 @@ protected AsyncProcessor(I iface, Map, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_args"); - - private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new getMetaConf_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new getMetaConf_argsTupleSchemeFactory()); - } - - private String key; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - KEY((short)1, "key"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } + public static class create_resource_plan extends org.apache.thrift.AsyncProcessFunction { + public create_resource_plan() { + super("create_resource_plan"); } - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // KEY - return KEY; - default: - return null; - } + public create_resource_plan_args getEmptyArgsInstance() { + return new create_resource_plan_args(); } - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + create_resource_plan_result result = new create_resource_plan_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + create_resource_plan_result result = new create_resource_plan_result(); + if (e instanceof InvalidObjectException) { + result.o1 = (InvalidObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; } - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); + protected boolean isOneway() { + return false; } - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; + public void start(I iface, create_resource_plan_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.create_resource_plan(args.resourcePlan,resultHandler); } + } - public short getThriftFieldId() { - return _thriftId; + public static class get_resource_plan extends org.apache.thrift.AsyncProcessFunction { + public get_resource_plan() { + super("get_resource_plan"); } - public String getFieldName() { - return _fieldName; + public get_resource_plan_args getEmptyArgsInstance() { + return new get_resource_plan_args(); } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getMetaConf_args.class, metaDataMap); - } - - public getMetaConf_args() { - } - public getMetaConf_args( - String key) - { - this(); - this.key = key; - } - - /** - * Performs a deep copy on other. - */ - public getMetaConf_args(getMetaConf_args other) { - if (other.isSetKey()) { - this.key = other.key; + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(WMResourcePlan o) { + get_resource_plan_result result = new get_resource_plan_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_resource_plan_result result = new get_resource_plan_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; } - } - - public getMetaConf_args deepCopy() { - return new getMetaConf_args(this); - } - - @Override - public void clear() { - this.key = null; - } - - public String getKey() { - return this.key; - } - - public void setKey(String key) { - this.key = key; - } - - public void unsetKey() { - this.key = null; - } - - /** Returns true if field key is set (has been assigned a value) and false otherwise */ - public boolean isSetKey() { - return this.key != null; - } - public void setKeyIsSet(boolean value) { - if (!value) { - this.key = null; + protected boolean isOneway() { + return false; } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case KEY: - if (value == null) { - unsetKey(); - } else { - setKey((String)value); - } - break; + public void start(I iface, get_resource_plan_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_resource_plan(args.name,resultHandler); } } - public Object getFieldValue(_Fields field) { - switch (field) { - case KEY: - return getKey(); - + public static class get_all_resource_plans extends org.apache.thrift.AsyncProcessFunction> { + public get_all_resource_plans() { + super("get_all_resource_plans"); } - throw new IllegalStateException(); - } - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); + public get_all_resource_plans_args getEmptyArgsInstance() { + return new get_all_resource_plans_args(); } - switch (field) { - case KEY: - return isSetKey(); + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_all_resource_plans_result result = new get_all_resource_plans_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_all_resource_plans_result result = new get_all_resource_plans_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; } - throw new IllegalStateException(); - } - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof getMetaConf_args) - return this.equals((getMetaConf_args)that); - return false; - } - - public boolean equals(getMetaConf_args that) { - if (that == null) + protected boolean isOneway() { return false; - - boolean this_present_key = true && this.isSetKey(); - boolean that_present_key = true && that.isSetKey(); - if (this_present_key || that_present_key) { - if (!(this_present_key && that_present_key)) - return false; - if (!this.key.equals(that.key)) - return false; } - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_key = true && (isSetKey()); - list.add(present_key); - if (present_key) - list.add(key); - - return list.hashCode(); + public void start(I iface, get_all_resource_plans_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_all_resource_plans(resultHandler); + } } - @Override - public int compareTo(getMetaConf_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); + public static class alter_resource_plan extends org.apache.thrift.AsyncProcessFunction { + public alter_resource_plan() { + super("alter_resource_plan"); } - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); - if (lastComparison != 0) { - return lastComparison; + public alter_resource_plan_args getEmptyArgsInstance() { + return new alter_resource_plan_args(); } - if (isSetKey()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - @Override - public String toString() { - StringBuilder sb = new StringBuilder("getMetaConf_args("); - boolean first = true; - - sb.append("key:"); - if (this.key == null) { - sb.append("null"); - } else { - sb.append(this.key); + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + alter_resource_plan_result result = new alter_resource_plan_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + alter_resource_plan_result result = new alter_resource_plan_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof InvalidOperationException) { + result.o3 = (InvalidOperationException) e; + result.setO3IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); + protected boolean isOneway() { + return false; } - } - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); + public void start(I iface, alter_resource_plan_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.alter_resource_plan(args.name, args.resourcePlan,resultHandler); } } - private static class getMetaConf_argsStandardSchemeFactory implements SchemeFactory { - public getMetaConf_argsStandardScheme getScheme() { - return new getMetaConf_argsStandardScheme(); + public static class validate_resource_plan extends org.apache.thrift.AsyncProcessFunction { + public validate_resource_plan() { + super("validate_resource_plan"); } - } - private static class getMetaConf_argsStandardScheme extends StandardScheme { + public validate_resource_plan_args getEmptyArgsInstance() { + return new validate_resource_plan_args(); + } - public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + validate_resource_plan_result result = new validate_resource_plan_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); } - switch (schemeField.id) { - case 1: // KEY - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.key = iprot.readString(); - struct.setKeyIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + validate_resource_plan_result result = new validate_resource_plan_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); + }; } - public void write(org.apache.thrift.protocol.TProtocol oprot, getMetaConf_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.key != null) { - oprot.writeFieldBegin(KEY_FIELD_DESC); - oprot.writeString(struct.key); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); + protected boolean isOneway() { + return false; } + public void start(I iface, validate_resource_plan_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.validate_resource_plan(args.name,resultHandler); + } } - private static class getMetaConf_argsTupleSchemeFactory implements SchemeFactory { - public getMetaConf_argsTupleScheme getScheme() { - return new getMetaConf_argsTupleScheme(); + public static class drop_resource_plan extends org.apache.thrift.AsyncProcessFunction { + public drop_resource_plan() { + super("drop_resource_plan"); } - } - private static class getMetaConf_argsTupleScheme extends TupleScheme { + public drop_resource_plan_args getEmptyArgsInstance() { + return new drop_resource_plan_args(); + } - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetKey()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetKey()) { - oprot.writeString(struct.key); - } + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + drop_resource_plan_result result = new drop_resource_plan_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + drop_resource_plan_result result = new drop_resource_plan_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; } - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.key = iprot.readString(); - struct.setKeyIsSet(true); - } + protected boolean isOneway() { + return false; + } + + public void start(I iface, drop_resource_plan_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.drop_resource_plan(args.name,resultHandler); } } } - public static class getMetaConf_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_result"); + public static class getMetaConf_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_args"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); - private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new getMetaConf_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new getMetaConf_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new getMetaConf_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getMetaConf_argsTupleSchemeFactory()); } - private String success; // required - private MetaException o1; // required + private String key; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"), - O1((short)1, "o1"); + KEY((short)1, "key"); private static final Map byName = new HashMap(); @@ -25764,10 +26317,371 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args str */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - case 1: // O1 - return O1; + case 1: // KEY + return KEY; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getMetaConf_args.class, metaDataMap); + } + + public getMetaConf_args() { + } + + public getMetaConf_args( + String key) + { + this(); + this.key = key; + } + + /** + * Performs a deep copy on other. + */ + public getMetaConf_args(getMetaConf_args other) { + if (other.isSetKey()) { + this.key = other.key; + } + } + + public getMetaConf_args deepCopy() { + return new getMetaConf_args(this); + } + + @Override + public void clear() { + this.key = null; + } + + public String getKey() { + return this.key; + } + + public void setKey(String key) { + this.key = key; + } + + public void unsetKey() { + this.key = null; + } + + /** Returns true if field key is set (has been assigned a value) and false otherwise */ + public boolean isSetKey() { + return this.key != null; + } + + public void setKeyIsSet(boolean value) { + if (!value) { + this.key = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case KEY: + if (value == null) { + unsetKey(); + } else { + setKey((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case KEY: + return getKey(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case KEY: + return isSetKey(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof getMetaConf_args) + return this.equals((getMetaConf_args)that); + return false; + } + + public boolean equals(getMetaConf_args that) { + if (that == null) + return false; + + boolean this_present_key = true && this.isSetKey(); + boolean that_present_key = true && that.isSetKey(); + if (this_present_key || that_present_key) { + if (!(this_present_key && that_present_key)) + return false; + if (!this.key.equals(that.key)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_key = true && (isSetKey()); + list.add(present_key); + if (present_key) + list.add(key); + + return list.hashCode(); + } + + @Override + public int compareTo(getMetaConf_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetKey()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("getMetaConf_args("); + boolean first = true; + + sb.append("key:"); + if (this.key == null) { + sb.append("null"); + } else { + sb.append(this.key); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getMetaConf_argsStandardSchemeFactory implements SchemeFactory { + public getMetaConf_argsStandardScheme getScheme() { + return new getMetaConf_argsStandardScheme(); + } + } + + private static class getMetaConf_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // KEY + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.key = iprot.readString(); + struct.setKeyIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getMetaConf_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.key != null) { + oprot.writeFieldBegin(KEY_FIELD_DESC); + oprot.writeString(struct.key); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getMetaConf_argsTupleSchemeFactory implements SchemeFactory { + public getMetaConf_argsTupleScheme getScheme() { + return new getMetaConf_argsTupleScheme(); + } + } + + private static class getMetaConf_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetKey()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetKey()) { + oprot.writeString(struct.key); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.key = iprot.readString(); + struct.setKeyIsSet(true); + } + } + } + + } + + public static class getMetaConf_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new getMetaConf_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getMetaConf_resultTupleSchemeFactory()); + } + + private String success; // required + private MetaException o1; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; default: return null; } @@ -189411,4 +190325,5217 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_metastore_db_uui } + public static class create_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_resource_plan_args"); + + private static final org.apache.thrift.protocol.TField RESOURCE_PLAN_FIELD_DESC = new org.apache.thrift.protocol.TField("resourcePlan", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new create_resource_plan_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_resource_plan_argsTupleSchemeFactory()); + } + + private WMResourcePlan resourcePlan; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RESOURCE_PLAN((short)1, "resourcePlan"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RESOURCE_PLAN + return RESOURCE_PLAN; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RESOURCE_PLAN, new org.apache.thrift.meta_data.FieldMetaData("resourcePlan", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMResourcePlan.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_resource_plan_args.class, metaDataMap); + } + + public create_resource_plan_args() { + } + + public create_resource_plan_args( + WMResourcePlan resourcePlan) + { + this(); + this.resourcePlan = resourcePlan; + } + + /** + * Performs a deep copy on other. + */ + public create_resource_plan_args(create_resource_plan_args other) { + if (other.isSetResourcePlan()) { + this.resourcePlan = new WMResourcePlan(other.resourcePlan); + } + } + + public create_resource_plan_args deepCopy() { + return new create_resource_plan_args(this); + } + + @Override + public void clear() { + this.resourcePlan = null; + } + + public WMResourcePlan getResourcePlan() { + return this.resourcePlan; + } + + public void setResourcePlan(WMResourcePlan resourcePlan) { + this.resourcePlan = resourcePlan; + } + + public void unsetResourcePlan() { + this.resourcePlan = null; + } + + /** Returns true if field resourcePlan is set (has been assigned a value) and false otherwise */ + public boolean isSetResourcePlan() { + return this.resourcePlan != null; + } + + public void setResourcePlanIsSet(boolean value) { + if (!value) { + this.resourcePlan = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RESOURCE_PLAN: + if (value == null) { + unsetResourcePlan(); + } else { + setResourcePlan((WMResourcePlan)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RESOURCE_PLAN: + return getResourcePlan(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RESOURCE_PLAN: + return isSetResourcePlan(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof create_resource_plan_args) + return this.equals((create_resource_plan_args)that); + return false; + } + + public boolean equals(create_resource_plan_args that) { + if (that == null) + return false; + + boolean this_present_resourcePlan = true && this.isSetResourcePlan(); + boolean that_present_resourcePlan = true && that.isSetResourcePlan(); + if (this_present_resourcePlan || that_present_resourcePlan) { + if (!(this_present_resourcePlan && that_present_resourcePlan)) + return false; + if (!this.resourcePlan.equals(that.resourcePlan)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_resourcePlan = true && (isSetResourcePlan()); + list.add(present_resourcePlan); + if (present_resourcePlan) + list.add(resourcePlan); + + return list.hashCode(); + } + + @Override + public int compareTo(create_resource_plan_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetResourcePlan()).compareTo(other.isSetResourcePlan()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetResourcePlan()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.resourcePlan, other.resourcePlan); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("create_resource_plan_args("); + boolean first = true; + + sb.append("resourcePlan:"); + if (this.resourcePlan == null) { + sb.append("null"); + } else { + sb.append(this.resourcePlan); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (resourcePlan != null) { + resourcePlan.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class create_resource_plan_argsStandardSchemeFactory implements SchemeFactory { + public create_resource_plan_argsStandardScheme getScheme() { + return new create_resource_plan_argsStandardScheme(); + } + } + + private static class create_resource_plan_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, create_resource_plan_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RESOURCE_PLAN + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.resourcePlan = new WMResourcePlan(); + struct.resourcePlan.read(iprot); + struct.setResourcePlanIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, create_resource_plan_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.resourcePlan != null) { + oprot.writeFieldBegin(RESOURCE_PLAN_FIELD_DESC); + struct.resourcePlan.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class create_resource_plan_argsTupleSchemeFactory implements SchemeFactory { + public create_resource_plan_argsTupleScheme getScheme() { + return new create_resource_plan_argsTupleScheme(); + } + } + + private static class create_resource_plan_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, create_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetResourcePlan()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetResourcePlan()) { + struct.resourcePlan.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, create_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.resourcePlan = new WMResourcePlan(); + struct.resourcePlan.read(iprot); + struct.setResourcePlanIsSet(true); + } + } + } + + } + + public static class create_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_resource_plan_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new create_resource_plan_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_resource_plan_resultTupleSchemeFactory()); + } + + private InvalidObjectException o1; // required + private MetaException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_resource_plan_result.class, metaDataMap); + } + + public create_resource_plan_result() { + } + + public create_resource_plan_result( + InvalidObjectException o1, + MetaException o2) + { + this(); + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public create_resource_plan_result(create_resource_plan_result other) { + if (other.isSetO1()) { + this.o1 = new InvalidObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public create_resource_plan_result deepCopy() { + return new create_resource_plan_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + } + + public InvalidObjectException getO1() { + return this.o1; + } + + public void setO1(InvalidObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((InvalidObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof create_resource_plan_result) + return this.equals((create_resource_plan_result)that); + return false; + } + + public boolean equals(create_resource_plan_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(create_resource_plan_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("create_resource_plan_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class create_resource_plan_resultStandardSchemeFactory implements SchemeFactory { + public create_resource_plan_resultStandardScheme getScheme() { + return new create_resource_plan_resultStandardScheme(); + } + } + + private static class create_resource_plan_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, create_resource_plan_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new InvalidObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, create_resource_plan_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class create_resource_plan_resultTupleSchemeFactory implements SchemeFactory { + public create_resource_plan_resultTupleScheme getScheme() { + return new create_resource_plan_resultTupleScheme(); + } + } + + private static class create_resource_plan_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, create_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, create_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.o1 = new InvalidObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + public static class get_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_resource_plan_args"); + + private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_resource_plan_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_resource_plan_argsTupleSchemeFactory()); + } + + private String name; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + NAME((short)1, "name"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // NAME + return NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_resource_plan_args.class, metaDataMap); + } + + public get_resource_plan_args() { + } + + public get_resource_plan_args( + String name) + { + this(); + this.name = name; + } + + /** + * Performs a deep copy on other. + */ + public get_resource_plan_args(get_resource_plan_args other) { + if (other.isSetName()) { + this.name = other.name; + } + } + + public get_resource_plan_args deepCopy() { + return new get_resource_plan_args(this); + } + + @Override + public void clear() { + this.name = null; + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public void unsetName() { + this.name = null; + } + + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean isSetName() { + return this.name != null; + } + + public void setNameIsSet(boolean value) { + if (!value) { + this.name = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case NAME: + if (value == null) { + unsetName(); + } else { + setName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case NAME: + return getName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case NAME: + return isSetName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_resource_plan_args) + return this.equals((get_resource_plan_args)that); + return false; + } + + public boolean equals(get_resource_plan_args that) { + if (that == null) + return false; + + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) + return false; + if (!this.name.equals(that.name)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_name = true && (isSetName()); + list.add(present_name); + if (present_name) + list.add(name); + + return list.hashCode(); + } + + @Override + public int compareTo(get_resource_plan_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_resource_plan_args("); + boolean first = true; + + sb.append("name:"); + if (this.name == null) { + sb.append("null"); + } else { + sb.append(this.name); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_resource_plan_argsStandardSchemeFactory implements SchemeFactory { + public get_resource_plan_argsStandardScheme getScheme() { + return new get_resource_plan_argsStandardScheme(); + } + } + + private static class get_resource_plan_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_resource_plan_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_resource_plan_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(struct.name); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_resource_plan_argsTupleSchemeFactory implements SchemeFactory { + public get_resource_plan_argsTupleScheme getScheme() { + return new get_resource_plan_argsTupleScheme(); + } + } + + private static class get_resource_plan_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetName()) { + oprot.writeString(struct.name); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } + } + } + + } + + public static class get_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_resource_plan_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_resource_plan_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_resource_plan_resultTupleSchemeFactory()); + } + + private WMResourcePlan success; // required + private NoSuchObjectException o1; // required + private MetaException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMResourcePlan.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_resource_plan_result.class, metaDataMap); + } + + public get_resource_plan_result() { + } + + public get_resource_plan_result( + WMResourcePlan success, + NoSuchObjectException o1, + MetaException o2) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public get_resource_plan_result(get_resource_plan_result other) { + if (other.isSetSuccess()) { + this.success = new WMResourcePlan(other.success); + } + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public get_resource_plan_result deepCopy() { + return new get_resource_plan_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + } + + public WMResourcePlan getSuccess() { + return this.success; + } + + public void setSuccess(WMResourcePlan success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((WMResourcePlan)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_resource_plan_result) + return this.equals((get_resource_plan_result)that); + return false; + } + + public boolean equals(get_resource_plan_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(get_resource_plan_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_resource_plan_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_resource_plan_resultStandardSchemeFactory implements SchemeFactory { + public get_resource_plan_resultStandardScheme getScheme() { + return new get_resource_plan_resultStandardScheme(); + } + } + + private static class get_resource_plan_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_resource_plan_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new WMResourcePlan(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_resource_plan_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_resource_plan_resultTupleSchemeFactory implements SchemeFactory { + public get_resource_plan_resultTupleScheme getScheme() { + return new get_resource_plan_resultTupleScheme(); + } + } + + private static class get_resource_plan_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.success = new WMResourcePlan(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + public static class get_all_resource_plans_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_all_resource_plans_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_all_resource_plans_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_all_resource_plans_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_all_resource_plans_args.class, metaDataMap); + } + + public get_all_resource_plans_args() { + } + + /** + * Performs a deep copy on other. + */ + public get_all_resource_plans_args(get_all_resource_plans_args other) { + } + + public get_all_resource_plans_args deepCopy() { + return new get_all_resource_plans_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_all_resource_plans_args) + return this.equals((get_all_resource_plans_args)that); + return false; + } + + public boolean equals(get_all_resource_plans_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(get_all_resource_plans_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_all_resource_plans_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_all_resource_plans_argsStandardSchemeFactory implements SchemeFactory { + public get_all_resource_plans_argsStandardScheme getScheme() { + return new get_all_resource_plans_argsStandardScheme(); + } + } + + private static class get_all_resource_plans_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_resource_plans_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_resource_plans_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_all_resource_plans_argsTupleSchemeFactory implements SchemeFactory { + public get_all_resource_plans_argsTupleScheme getScheme() { + return new get_all_resource_plans_argsTupleScheme(); + } + } + + private static class get_all_resource_plans_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_all_resource_plans_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_all_resource_plans_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class get_all_resource_plans_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_all_resource_plans_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_all_resource_plans_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_all_resource_plans_resultTupleSchemeFactory()); + } + + private List success; // required + private MetaException o1; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMResourcePlan.class)))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_all_resource_plans_result.class, metaDataMap); + } + + public get_all_resource_plans_result() { + } + + public get_all_resource_plans_result( + List success, + MetaException o1) + { + this(); + this.success = success; + this.o1 = o1; + } + + /** + * Performs a deep copy on other. + */ + public get_all_resource_plans_result(get_all_resource_plans_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(other.success.size()); + for (WMResourcePlan other_element : other.success) { + __this__success.add(new WMResourcePlan(other_element)); + } + this.success = __this__success; + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + } + + public get_all_resource_plans_result deepCopy() { + return new get_all_resource_plans_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + } + + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(WMResourcePlan elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { + return this.success; + } + + public void setSuccess(List success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((List)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_all_resource_plans_result) + return this.equals((get_all_resource_plans_result)that); + return false; + } + + public boolean equals(get_all_resource_plans_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + return list.hashCode(); + } + + @Override + public int compareTo(get_all_resource_plans_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_all_resource_plans_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_all_resource_plans_resultStandardSchemeFactory implements SchemeFactory { + public get_all_resource_plans_resultStandardScheme getScheme() { + return new get_all_resource_plans_resultStandardScheme(); + } + } + + private static class get_all_resource_plans_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_resource_plans_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1286 = iprot.readListBegin(); + struct.success = new ArrayList(_list1286.size); + WMResourcePlan _elem1287; + for (int _i1288 = 0; _i1288 < _list1286.size; ++_i1288) + { + _elem1287 = new WMResourcePlan(); + _elem1287.read(iprot); + struct.success.add(_elem1287); + } + iprot.readListEnd(); + } + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_resource_plans_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); + for (WMResourcePlan _iter1289 : struct.success) + { + _iter1289.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_all_resource_plans_resultTupleSchemeFactory implements SchemeFactory { + public get_all_resource_plans_resultTupleScheme getScheme() { + return new get_all_resource_plans_resultTupleScheme(); + } + } + + private static class get_all_resource_plans_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_all_resource_plans_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + { + oprot.writeI32(struct.success.size()); + for (WMResourcePlan _iter1290 : struct.success) + { + _iter1290.write(oprot); + } + } + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_all_resource_plans_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list1291 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1291.size); + WMResourcePlan _elem1292; + for (int _i1293 = 0; _i1293 < _list1291.size; ++_i1293) + { + _elem1292 = new WMResourcePlan(); + _elem1292.read(iprot); + struct.success.add(_elem1292); + } + } + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + } + } + + } + + public static class alter_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_resource_plan_args"); + + private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField RESOURCE_PLAN_FIELD_DESC = new org.apache.thrift.protocol.TField("resourcePlan", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new alter_resource_plan_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_resource_plan_argsTupleSchemeFactory()); + } + + private String name; // required + private WMResourcePlan resourcePlan; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + NAME((short)1, "name"), + RESOURCE_PLAN((short)2, "resourcePlan"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // NAME + return NAME; + case 2: // RESOURCE_PLAN + return RESOURCE_PLAN; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.RESOURCE_PLAN, new org.apache.thrift.meta_data.FieldMetaData("resourcePlan", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WMResourcePlan.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_resource_plan_args.class, metaDataMap); + } + + public alter_resource_plan_args() { + } + + public alter_resource_plan_args( + String name, + WMResourcePlan resourcePlan) + { + this(); + this.name = name; + this.resourcePlan = resourcePlan; + } + + /** + * Performs a deep copy on other. + */ + public alter_resource_plan_args(alter_resource_plan_args other) { + if (other.isSetName()) { + this.name = other.name; + } + if (other.isSetResourcePlan()) { + this.resourcePlan = new WMResourcePlan(other.resourcePlan); + } + } + + public alter_resource_plan_args deepCopy() { + return new alter_resource_plan_args(this); + } + + @Override + public void clear() { + this.name = null; + this.resourcePlan = null; + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public void unsetName() { + this.name = null; + } + + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean isSetName() { + return this.name != null; + } + + public void setNameIsSet(boolean value) { + if (!value) { + this.name = null; + } + } + + public WMResourcePlan getResourcePlan() { + return this.resourcePlan; + } + + public void setResourcePlan(WMResourcePlan resourcePlan) { + this.resourcePlan = resourcePlan; + } + + public void unsetResourcePlan() { + this.resourcePlan = null; + } + + /** Returns true if field resourcePlan is set (has been assigned a value) and false otherwise */ + public boolean isSetResourcePlan() { + return this.resourcePlan != null; + } + + public void setResourcePlanIsSet(boolean value) { + if (!value) { + this.resourcePlan = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case NAME: + if (value == null) { + unsetName(); + } else { + setName((String)value); + } + break; + + case RESOURCE_PLAN: + if (value == null) { + unsetResourcePlan(); + } else { + setResourcePlan((WMResourcePlan)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case NAME: + return getName(); + + case RESOURCE_PLAN: + return getResourcePlan(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case NAME: + return isSetName(); + case RESOURCE_PLAN: + return isSetResourcePlan(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof alter_resource_plan_args) + return this.equals((alter_resource_plan_args)that); + return false; + } + + public boolean equals(alter_resource_plan_args that) { + if (that == null) + return false; + + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) + return false; + if (!this.name.equals(that.name)) + return false; + } + + boolean this_present_resourcePlan = true && this.isSetResourcePlan(); + boolean that_present_resourcePlan = true && that.isSetResourcePlan(); + if (this_present_resourcePlan || that_present_resourcePlan) { + if (!(this_present_resourcePlan && that_present_resourcePlan)) + return false; + if (!this.resourcePlan.equals(that.resourcePlan)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_name = true && (isSetName()); + list.add(present_name); + if (present_name) + list.add(name); + + boolean present_resourcePlan = true && (isSetResourcePlan()); + list.add(present_resourcePlan); + if (present_resourcePlan) + list.add(resourcePlan); + + return list.hashCode(); + } + + @Override + public int compareTo(alter_resource_plan_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetResourcePlan()).compareTo(other.isSetResourcePlan()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetResourcePlan()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.resourcePlan, other.resourcePlan); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("alter_resource_plan_args("); + boolean first = true; + + sb.append("name:"); + if (this.name == null) { + sb.append("null"); + } else { + sb.append(this.name); + } + first = false; + if (!first) sb.append(", "); + sb.append("resourcePlan:"); + if (this.resourcePlan == null) { + sb.append("null"); + } else { + sb.append(this.resourcePlan); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (resourcePlan != null) { + resourcePlan.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class alter_resource_plan_argsStandardSchemeFactory implements SchemeFactory { + public alter_resource_plan_argsStandardScheme getScheme() { + return new alter_resource_plan_argsStandardScheme(); + } + } + + private static class alter_resource_plan_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_resource_plan_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // RESOURCE_PLAN + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.resourcePlan = new WMResourcePlan(); + struct.resourcePlan.read(iprot); + struct.setResourcePlanIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_resource_plan_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(struct.name); + oprot.writeFieldEnd(); + } + if (struct.resourcePlan != null) { + oprot.writeFieldBegin(RESOURCE_PLAN_FIELD_DESC); + struct.resourcePlan.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class alter_resource_plan_argsTupleSchemeFactory implements SchemeFactory { + public alter_resource_plan_argsTupleScheme getScheme() { + return new alter_resource_plan_argsTupleScheme(); + } + } + + private static class alter_resource_plan_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetName()) { + optionals.set(0); + } + if (struct.isSetResourcePlan()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetName()) { + oprot.writeString(struct.name); + } + if (struct.isSetResourcePlan()) { + struct.resourcePlan.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } + if (incoming.get(1)) { + struct.resourcePlan = new WMResourcePlan(); + struct.resourcePlan.read(iprot); + struct.setResourcePlanIsSet(true); + } + } + } + + } + + public static class alter_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_resource_plan_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new alter_resource_plan_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_resource_plan_resultTupleSchemeFactory()); + } + + private NoSuchObjectException o1; // required + private InvalidOperationException o3; // required + private MetaException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O3((short)2, "o3"), + O2((short)3, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O3 + return O3; + case 3: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_resource_plan_result.class, metaDataMap); + } + + public alter_resource_plan_result() { + } + + public alter_resource_plan_result( + NoSuchObjectException o1, + InvalidOperationException o3, + MetaException o2) + { + this(); + this.o1 = o1; + this.o3 = o3; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public alter_resource_plan_result(alter_resource_plan_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO3()) { + this.o3 = new InvalidOperationException(other.o3); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public alter_resource_plan_result deepCopy() { + return new alter_resource_plan_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o3 = null; + this.o2 = null; + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public InvalidOperationException getO3() { + return this.o3; + } + + public void setO3(InvalidOperationException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((InvalidOperationException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O3: + return getO3(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O3: + return isSetO3(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof alter_resource_plan_result) + return this.equals((alter_resource_plan_result)that); + return false; + } + + public boolean equals(alter_resource_plan_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(alter_resource_plan_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("alter_resource_plan_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class alter_resource_plan_resultStandardSchemeFactory implements SchemeFactory { + public alter_resource_plan_resultStandardScheme getScheme() { + return new alter_resource_plan_resultStandardScheme(); + } + } + + private static class alter_resource_plan_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_resource_plan_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new InvalidOperationException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_resource_plan_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class alter_resource_plan_resultTupleSchemeFactory implements SchemeFactory { + public alter_resource_plan_resultTupleScheme getScheme() { + return new alter_resource_plan_resultTupleScheme(); + } + } + + private static class alter_resource_plan_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO3()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, alter_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o3 = new InvalidOperationException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + public static class validate_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("validate_resource_plan_args"); + + private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new validate_resource_plan_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new validate_resource_plan_argsTupleSchemeFactory()); + } + + private String name; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + NAME((short)1, "name"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // NAME + return NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(validate_resource_plan_args.class, metaDataMap); + } + + public validate_resource_plan_args() { + } + + public validate_resource_plan_args( + String name) + { + this(); + this.name = name; + } + + /** + * Performs a deep copy on other. + */ + public validate_resource_plan_args(validate_resource_plan_args other) { + if (other.isSetName()) { + this.name = other.name; + } + } + + public validate_resource_plan_args deepCopy() { + return new validate_resource_plan_args(this); + } + + @Override + public void clear() { + this.name = null; + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public void unsetName() { + this.name = null; + } + + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean isSetName() { + return this.name != null; + } + + public void setNameIsSet(boolean value) { + if (!value) { + this.name = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case NAME: + if (value == null) { + unsetName(); + } else { + setName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case NAME: + return getName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case NAME: + return isSetName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof validate_resource_plan_args) + return this.equals((validate_resource_plan_args)that); + return false; + } + + public boolean equals(validate_resource_plan_args that) { + if (that == null) + return false; + + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) + return false; + if (!this.name.equals(that.name)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_name = true && (isSetName()); + list.add(present_name); + if (present_name) + list.add(name); + + return list.hashCode(); + } + + @Override + public int compareTo(validate_resource_plan_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("validate_resource_plan_args("); + boolean first = true; + + sb.append("name:"); + if (this.name == null) { + sb.append("null"); + } else { + sb.append(this.name); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class validate_resource_plan_argsStandardSchemeFactory implements SchemeFactory { + public validate_resource_plan_argsStandardScheme getScheme() { + return new validate_resource_plan_argsStandardScheme(); + } + } + + private static class validate_resource_plan_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, validate_resource_plan_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, validate_resource_plan_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(struct.name); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class validate_resource_plan_argsTupleSchemeFactory implements SchemeFactory { + public validate_resource_plan_argsTupleScheme getScheme() { + return new validate_resource_plan_argsTupleScheme(); + } + } + + private static class validate_resource_plan_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, validate_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetName()) { + oprot.writeString(struct.name); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, validate_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } + } + } + + } + + public static class validate_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("validate_resource_plan_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new validate_resource_plan_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new validate_resource_plan_resultTupleSchemeFactory()); + } + + private NoSuchObjectException o1; // required + private MetaException o3; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O3((short)2, "o3"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O3 + return O3; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(validate_resource_plan_result.class, metaDataMap); + } + + public validate_resource_plan_result() { + } + + public validate_resource_plan_result( + NoSuchObjectException o1, + MetaException o3) + { + this(); + this.o1 = o1; + this.o3 = o3; + } + + /** + * Performs a deep copy on other. + */ + public validate_resource_plan_result(validate_resource_plan_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } + } + + public validate_resource_plan_result deepCopy() { + return new validate_resource_plan_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o3 = null; + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O3: + return getO3(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O3: + return isSetO3(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof validate_resource_plan_result) + return this.equals((validate_resource_plan_result)that); + return false; + } + + public boolean equals(validate_resource_plan_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + + return list.hashCode(); + } + + @Override + public int compareTo(validate_resource_plan_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("validate_resource_plan_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class validate_resource_plan_resultStandardSchemeFactory implements SchemeFactory { + public validate_resource_plan_resultStandardScheme getScheme() { + return new validate_resource_plan_resultStandardScheme(); + } + } + + private static class validate_resource_plan_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, validate_resource_plan_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, validate_resource_plan_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class validate_resource_plan_resultTupleSchemeFactory implements SchemeFactory { + public validate_resource_plan_resultTupleScheme getScheme() { + return new validate_resource_plan_resultTupleScheme(); + } + } + + private static class validate_resource_plan_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, validate_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO3()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, validate_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + } + } + + } + + public static class drop_resource_plan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_resource_plan_args"); + + private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new drop_resource_plan_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_resource_plan_argsTupleSchemeFactory()); + } + + private String name; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + NAME((short)1, "name"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // NAME + return NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_resource_plan_args.class, metaDataMap); + } + + public drop_resource_plan_args() { + } + + public drop_resource_plan_args( + String name) + { + this(); + this.name = name; + } + + /** + * Performs a deep copy on other. + */ + public drop_resource_plan_args(drop_resource_plan_args other) { + if (other.isSetName()) { + this.name = other.name; + } + } + + public drop_resource_plan_args deepCopy() { + return new drop_resource_plan_args(this); + } + + @Override + public void clear() { + this.name = null; + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public void unsetName() { + this.name = null; + } + + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean isSetName() { + return this.name != null; + } + + public void setNameIsSet(boolean value) { + if (!value) { + this.name = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case NAME: + if (value == null) { + unsetName(); + } else { + setName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case NAME: + return getName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case NAME: + return isSetName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof drop_resource_plan_args) + return this.equals((drop_resource_plan_args)that); + return false; + } + + public boolean equals(drop_resource_plan_args that) { + if (that == null) + return false; + + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) + return false; + if (!this.name.equals(that.name)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_name = true && (isSetName()); + list.add(present_name); + if (present_name) + list.add(name); + + return list.hashCode(); + } + + @Override + public int compareTo(drop_resource_plan_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("drop_resource_plan_args("); + boolean first = true; + + sb.append("name:"); + if (this.name == null) { + sb.append("null"); + } else { + sb.append(this.name); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class drop_resource_plan_argsStandardSchemeFactory implements SchemeFactory { + public drop_resource_plan_argsStandardScheme getScheme() { + return new drop_resource_plan_argsStandardScheme(); + } + } + + private static class drop_resource_plan_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_resource_plan_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_resource_plan_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(struct.name); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class drop_resource_plan_argsTupleSchemeFactory implements SchemeFactory { + public drop_resource_plan_argsTupleScheme getScheme() { + return new drop_resource_plan_argsTupleScheme(); + } + } + + private static class drop_resource_plan_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetName()) { + oprot.writeString(struct.name); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } + } + } + + } + + public static class drop_resource_plan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_resource_plan_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new drop_resource_plan_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_resource_plan_resultTupleSchemeFactory()); + } + + private NoSuchObjectException o1; // required + private MetaException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_resource_plan_result.class, metaDataMap); + } + + public drop_resource_plan_result() { + } + + public drop_resource_plan_result( + NoSuchObjectException o1, + MetaException o2) + { + this(); + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public drop_resource_plan_result(drop_resource_plan_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public drop_resource_plan_result deepCopy() { + return new drop_resource_plan_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof drop_resource_plan_result) + return this.equals((drop_resource_plan_result)that); + return false; + } + + public boolean equals(drop_resource_plan_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(drop_resource_plan_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("drop_resource_plan_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class drop_resource_plan_resultStandardSchemeFactory implements SchemeFactory { + public drop_resource_plan_resultStandardScheme getScheme() { + return new drop_resource_plan_resultStandardScheme(); + } + } + + private static class drop_resource_plan_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_resource_plan_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_resource_plan_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class drop_resource_plan_resultTupleSchemeFactory implements SchemeFactory { + public drop_resource_plan_resultTupleScheme getScheme() { + return new drop_resource_plan_resultTupleScheme(); + } + } + + private static class drop_resource_plan_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, drop_resource_plan_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + } diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index 358b8520f4..8d126b9a8b 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -1247,6 +1247,44 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @throws \metastore\MetaException */ public function get_metastore_db_uuid(); + /** + * @param \metastore\WMResourcePlan $resourcePlan + * @throws \metastore\InvalidObjectException + * @throws \metastore\MetaException + */ + public function create_resource_plan(\metastore\WMResourcePlan $resourcePlan); + /** + * @param string $name + * @return \metastore\WMResourcePlan + * @throws \metastore\NoSuchObjectException + * @throws \metastore\MetaException + */ + public function get_resource_plan($name); + /** + * @return \metastore\WMResourcePlan[] + * @throws \metastore\MetaException + */ + public function get_all_resource_plans(); + /** + * @param string $name + * @param \metastore\WMResourcePlan $resourcePlan + * @throws \metastore\NoSuchObjectException + * @throws \metastore\InvalidOperationException + * @throws \metastore\MetaException + */ + public function alter_resource_plan($name, \metastore\WMResourcePlan $resourcePlan); + /** + * @param string $name + * @throws \metastore\NoSuchObjectException + * @throws \metastore\MetaException + */ + public function validate_resource_plan($name); + /** + * @param string $name + * @throws \metastore\NoSuchObjectException + * @throws \metastore\MetaException + */ + public function drop_resource_plan($name); } class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf { @@ -10435,196 +10473,347 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_metastore_db_uuid failed: unknown result"); } -} + public function create_resource_plan(\metastore\WMResourcePlan $resourcePlan) + { + $this->send_create_resource_plan($resourcePlan); + $this->recv_create_resource_plan(); + } -// HELPER FUNCTIONS AND STRUCTURES + public function send_create_resource_plan(\metastore\WMResourcePlan $resourcePlan) + { + $args = new \metastore\ThriftHiveMetastore_create_resource_plan_args(); + $args->resourcePlan = $resourcePlan; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'create_resource_plan', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('create_resource_plan', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } -class ThriftHiveMetastore_getMetaConf_args { - static $_TSPEC; + public function recv_create_resource_plan() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_create_resource_plan_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; - /** - * @var string - */ - public $key = null; + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_create_resource_plan_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + return; + } - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 1 => array( - 'var' => 'key', - 'type' => TType::STRING, - ), - ); + public function get_resource_plan($name) + { + $this->send_get_resource_plan($name); + return $this->recv_get_resource_plan(); + } + + public function send_get_resource_plan($name) + { + $args = new \metastore\ThriftHiveMetastore_get_resource_plan_args(); + $args->name = $name; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_resource_plan', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); } - if (is_array($vals)) { - if (isset($vals['key'])) { - $this->key = $vals['key']; + else + { + $this->output_->writeMessageBegin('get_resource_plan', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_resource_plan() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_resource_plan_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; } + $result = new \metastore\ThriftHiveMetastore_get_resource_plan_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + throw new \Exception("get_resource_plan failed: unknown result"); } - public function getName() { - return 'ThriftHiveMetastore_getMetaConf_args'; + public function get_all_resource_plans() + { + $this->send_get_all_resource_plans(); + return $this->recv_get_all_resource_plans(); } - public function read($input) + public function send_get_all_resource_plans() { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) + $args = new \metastore\ThriftHiveMetastore_get_all_resource_plans_args(); + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->key); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; + thrift_protocol_write_binary($this->output_, 'get_all_resource_plans', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_all_resource_plans', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_all_resource_plans() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_all_resource_plans_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; } - $xfer += $input->readFieldEnd(); + $result = new \metastore\ThriftHiveMetastore_get_all_resource_plans_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); } - $xfer += $input->readStructEnd(); - return $xfer; + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + throw new \Exception("get_all_resource_plans failed: unknown result"); } - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_args'); - if ($this->key !== null) { - $xfer += $output->writeFieldBegin('key', TType::STRING, 1); - $xfer += $output->writeString($this->key); - $xfer += $output->writeFieldEnd(); + public function alter_resource_plan($name, \metastore\WMResourcePlan $resourcePlan) + { + $this->send_alter_resource_plan($name, $resourcePlan); + $this->recv_alter_resource_plan(); + } + + public function send_alter_resource_plan($name, \metastore\WMResourcePlan $resourcePlan) + { + $args = new \metastore\ThriftHiveMetastore_alter_resource_plan_args(); + $args->name = $name; + $args->resourcePlan = $resourcePlan; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'alter_resource_plan', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('alter_resource_plan', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; } -} + public function recv_alter_resource_plan() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_alter_resource_plan_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; -class ThriftHiveMetastore_getMetaConf_result { - static $_TSPEC; + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_alter_resource_plan_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o3 !== null) { + throw $result->o3; + } + if ($result->o2 !== null) { + throw $result->o2; + } + return; + } - /** - * @var string - */ - public $success = null; - /** - * @var \metastore\MetaException - */ - public $o1 = null; + public function validate_resource_plan($name) + { + $this->send_validate_resource_plan($name); + $this->recv_validate_resource_plan(); + } - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 0 => array( - 'var' => 'success', - 'type' => TType::STRING, - ), - 1 => array( - 'var' => 'o1', - 'type' => TType::STRUCT, - 'class' => '\metastore\MetaException', - ), - ); + public function send_validate_resource_plan($name) + { + $args = new \metastore\ThriftHiveMetastore_validate_resource_plan_args(); + $args->name = $name; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'validate_resource_plan', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); } - if (is_array($vals)) { - if (isset($vals['success'])) { - $this->success = $vals['success']; - } - if (isset($vals['o1'])) { - $this->o1 = $vals['o1']; + else + { + $this->output_->writeMessageBegin('validate_resource_plan', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_validate_resource_plan() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_validate_resource_plan_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; } + $result = new \metastore\ThriftHiveMetastore_validate_resource_plan_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o3 !== null) { + throw $result->o3; } + return; } - public function getName() { - return 'ThriftHiveMetastore_getMetaConf_result'; + public function drop_resource_plan($name) + { + $this->send_drop_resource_plan($name); + $this->recv_drop_resource_plan(); } - public function read($input) + public function send_drop_resource_plan($name) { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) + $args = new \metastore\ThriftHiveMetastore_drop_resource_plan_args(); + $args->name = $name; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 0: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->success); - } else { - $xfer += $input->skip($ftype); - } - break; - case 1: - if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\MetaException(); - $xfer += $this->o1->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); + thrift_protocol_write_binary($this->output_, 'drop_resource_plan', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('drop_resource_plan', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); } - $xfer += $input->readStructEnd(); - return $xfer; } - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_result'); - if ($this->success !== null) { - $xfer += $output->writeFieldBegin('success', TType::STRING, 0); - $xfer += $output->writeString($this->success); - $xfer += $output->writeFieldEnd(); + public function recv_drop_resource_plan() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_drop_resource_plan_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_drop_resource_plan_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); } - if ($this->o1 !== null) { - $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); - $xfer += $this->o1->write($output); - $xfer += $output->writeFieldEnd(); + if ($result->o1 !== null) { + throw $result->o1; } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; + if ($result->o2 !== null) { + throw $result->o2; + } + return; } } -class ThriftHiveMetastore_setMetaConf_args { +// HELPER FUNCTIONS AND STRUCTURES + +class ThriftHiveMetastore_getMetaConf_args { static $_TSPEC; /** * @var string */ public $key = null; - /** - * @var string - */ - public $value = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -10633,24 +10822,203 @@ class ThriftHiveMetastore_setMetaConf_args { 'var' => 'key', 'type' => TType::STRING, ), - 2 => array( - 'var' => 'value', - 'type' => TType::STRING, - ), ); } if (is_array($vals)) { if (isset($vals['key'])) { $this->key = $vals['key']; } - if (isset($vals['value'])) { - $this->value = $vals['value']; - } } } public function getName() { - return 'ThriftHiveMetastore_setMetaConf_args'; + return 'ThriftHiveMetastore_getMetaConf_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->key); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_args'); + if ($this->key !== null) { + $xfer += $output->writeFieldBegin('key', TType::STRING, 1); + $xfer += $output->writeString($this->key); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_getMetaConf_result { + static $_TSPEC; + + /** + * @var string + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRING, + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_getMetaConf_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->success); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_result'); + if ($this->success !== null) { + $xfer += $output->writeFieldBegin('success', TType::STRING, 0); + $xfer += $output->writeString($this->success); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_setMetaConf_args { + static $_TSPEC; + + /** + * @var string + */ + public $key = null; + /** + * @var string + */ + public $value = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'key', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'value', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['key'])) { + $this->key = $vals['key']; + } + if (isset($vals['value'])) { + $this->value = $vals['value']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_setMetaConf_args'; } public function read($input) @@ -47428,4 +47796,1153 @@ class ThriftHiveMetastore_get_metastore_db_uuid_result { } +class ThriftHiveMetastore_create_resource_plan_args { + static $_TSPEC; + + /** + * @var \metastore\WMResourcePlan + */ + public $resourcePlan = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'resourcePlan', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMResourcePlan', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['resourcePlan'])) { + $this->resourcePlan = $vals['resourcePlan']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_create_resource_plan_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->resourcePlan = new \metastore\WMResourcePlan(); + $xfer += $this->resourcePlan->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_resource_plan_args'); + if ($this->resourcePlan !== null) { + if (!is_object($this->resourcePlan)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('resourcePlan', TType::STRUCT, 1); + $xfer += $this->resourcePlan->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_create_resource_plan_result { + static $_TSPEC; + + /** + * @var \metastore\InvalidObjectException + */ + public $o1 = null; + /** + * @var \metastore\MetaException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_create_resource_plan_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\InvalidObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_resource_plan_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_resource_plan_args { + static $_TSPEC; + + /** + * @var string + */ + public $name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_resource_plan_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_resource_plan_args'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_resource_plan_result { + static $_TSPEC; + + /** + * @var \metastore\WMResourcePlan + */ + public $success = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\MetaException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMResourcePlan', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_resource_plan_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\WMResourcePlan(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_resource_plan_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_all_resource_plans_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_all_resource_plans_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_resource_plans_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_all_resource_plans_result { + static $_TSPEC; + + /** + * @var \metastore\WMResourcePlan[] + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\WMResourcePlan', + ), + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_all_resource_plans_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size1133 = 0; + $_etype1136 = 0; + $xfer += $input->readListBegin($_etype1136, $_size1133); + for ($_i1137 = 0; $_i1137 < $_size1133; ++$_i1137) + { + $elem1138 = null; + $elem1138 = new \metastore\WMResourcePlan(); + $xfer += $elem1138->read($input); + $this->success []= $elem1138; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_resource_plans_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRUCT, count($this->success)); + { + foreach ($this->success as $iter1139) + { + $xfer += $iter1139->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_alter_resource_plan_args { + static $_TSPEC; + + /** + * @var string + */ + public $name = null; + /** + * @var \metastore\WMResourcePlan + */ + public $resourcePlan = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'resourcePlan', + 'type' => TType::STRUCT, + 'class' => '\metastore\WMResourcePlan', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + if (isset($vals['resourcePlan'])) { + $this->resourcePlan = $vals['resourcePlan']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_alter_resource_plan_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->resourcePlan = new \metastore\WMResourcePlan(); + $xfer += $this->resourcePlan->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_resource_plan_args'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + if ($this->resourcePlan !== null) { + if (!is_object($this->resourcePlan)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('resourcePlan', TType::STRUCT, 2); + $xfer += $this->resourcePlan->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_alter_resource_plan_result { + static $_TSPEC; + + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\InvalidOperationException + */ + public $o3 = null; + /** + * @var \metastore\MetaException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidOperationException', + ), + 3 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_alter_resource_plan_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\InvalidOperationException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_resource_plan_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 2); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 3); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_validate_resource_plan_args { + static $_TSPEC; + + /** + * @var string + */ + public $name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_validate_resource_plan_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_validate_resource_plan_args'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_validate_resource_plan_result { + static $_TSPEC; + + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_validate_resource_plan_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_validate_resource_plan_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 2); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_resource_plan_args { + static $_TSPEC; + + /** + * @var string + */ + public $name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_resource_plan_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_resource_plan_args'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_resource_plan_result { + static $_TSPEC; + + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\MetaException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_resource_plan_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_resource_plan_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-py/__init__.py standalone-metastore/src/gen/thrift/gen-py/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index de9ad1b3f1..7f792b073b 100755 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -186,6 +186,12 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req)') print(' CacheFileMetadataResult cache_file_metadata(CacheFileMetadataRequest req)') print(' string get_metastore_db_uuid()') + print(' void create_resource_plan(WMResourcePlan resourcePlan)') + print(' WMResourcePlan get_resource_plan(string name)') + print(' get_all_resource_plans()') + print(' void alter_resource_plan(string name, WMResourcePlan resourcePlan)') + print(' void validate_resource_plan(string name)') + print(' void drop_resource_plan(string name)') print(' string getName()') print(' string getVersion()') print(' fb_status getStatus()') @@ -1227,6 +1233,42 @@ elif cmd == 'get_metastore_db_uuid': sys.exit(1) pp.pprint(client.get_metastore_db_uuid()) +elif cmd == 'create_resource_plan': + if len(args) != 1: + print('create_resource_plan requires 1 args') + sys.exit(1) + pp.pprint(client.create_resource_plan(eval(args[0]),)) + +elif cmd == 'get_resource_plan': + if len(args) != 1: + print('get_resource_plan requires 1 args') + sys.exit(1) + pp.pprint(client.get_resource_plan(args[0],)) + +elif cmd == 'get_all_resource_plans': + if len(args) != 0: + print('get_all_resource_plans requires 0 args') + sys.exit(1) + pp.pprint(client.get_all_resource_plans()) + +elif cmd == 'alter_resource_plan': + if len(args) != 2: + print('alter_resource_plan requires 2 args') + sys.exit(1) + pp.pprint(client.alter_resource_plan(args[0],eval(args[1]),)) + +elif cmd == 'validate_resource_plan': + if len(args) != 1: + print('validate_resource_plan requires 1 args') + sys.exit(1) + pp.pprint(client.validate_resource_plan(args[0],)) + +elif cmd == 'drop_resource_plan': + if len(args) != 1: + print('drop_resource_plan requires 1 args') + sys.exit(1) + pp.pprint(client.drop_resource_plan(args[0],)) + elif cmd == 'getName': if len(args) != 0: print('getName requires 0 args') diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 345790122b..89ea07e275 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -1293,6 +1293,45 @@ def cache_file_metadata(self, req): def get_metastore_db_uuid(self): pass + def create_resource_plan(self, resourcePlan): + """ + Parameters: + - resourcePlan + """ + pass + + def get_resource_plan(self, name): + """ + Parameters: + - name + """ + pass + + def get_all_resource_plans(self): + pass + + def alter_resource_plan(self, name, resourcePlan): + """ + Parameters: + - name + - resourcePlan + """ + pass + + def validate_resource_plan(self, name): + """ + Parameters: + - name + """ + pass + + def drop_resource_plan(self, name): + """ + Parameters: + - name + """ + pass + class Client(fb303.FacebookService.Client, Iface): """ @@ -7127,6 +7166,205 @@ def recv_get_metastore_db_uuid(self): raise result.o1 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_metastore_db_uuid failed: unknown result") + def create_resource_plan(self, resourcePlan): + """ + Parameters: + - resourcePlan + """ + self.send_create_resource_plan(resourcePlan) + self.recv_create_resource_plan() + + def send_create_resource_plan(self, resourcePlan): + self._oprot.writeMessageBegin('create_resource_plan', TMessageType.CALL, self._seqid) + args = create_resource_plan_args() + args.resourcePlan = resourcePlan + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_resource_plan(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_resource_plan_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def get_resource_plan(self, name): + """ + Parameters: + - name + """ + self.send_get_resource_plan(name) + return self.recv_get_resource_plan() + + def send_get_resource_plan(self, name): + self._oprot.writeMessageBegin('get_resource_plan', TMessageType.CALL, self._seqid) + args = get_resource_plan_args() + args.name = name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_resource_plan(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_resource_plan_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_resource_plan failed: unknown result") + + def get_all_resource_plans(self): + self.send_get_all_resource_plans() + return self.recv_get_all_resource_plans() + + def send_get_all_resource_plans(self): + self._oprot.writeMessageBegin('get_all_resource_plans', TMessageType.CALL, self._seqid) + args = get_all_resource_plans_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_all_resource_plans(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_all_resource_plans_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_resource_plans failed: unknown result") + + def alter_resource_plan(self, name, resourcePlan): + """ + Parameters: + - name + - resourcePlan + """ + self.send_alter_resource_plan(name, resourcePlan) + self.recv_alter_resource_plan() + + def send_alter_resource_plan(self, name, resourcePlan): + self._oprot.writeMessageBegin('alter_resource_plan', TMessageType.CALL, self._seqid) + args = alter_resource_plan_args() + args.name = name + args.resourcePlan = resourcePlan + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_resource_plan(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_resource_plan_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o3 is not None: + raise result.o3 + if result.o2 is not None: + raise result.o2 + return + + def validate_resource_plan(self, name): + """ + Parameters: + - name + """ + self.send_validate_resource_plan(name) + self.recv_validate_resource_plan() + + def send_validate_resource_plan(self, name): + self._oprot.writeMessageBegin('validate_resource_plan', TMessageType.CALL, self._seqid) + args = validate_resource_plan_args() + args.name = name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_validate_resource_plan(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = validate_resource_plan_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o3 is not None: + raise result.o3 + return + + def drop_resource_plan(self, name): + """ + Parameters: + - name + """ + self.send_drop_resource_plan(name) + self.recv_drop_resource_plan() + + def send_drop_resource_plan(self, name): + self._oprot.writeMessageBegin('drop_resource_plan', TMessageType.CALL, self._seqid) + args = drop_resource_plan_args() + args.name = name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_resource_plan(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_resource_plan_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + class Processor(fb303.FacebookService.Processor, Iface, TProcessor): def __init__(self, handler): @@ -7293,6 +7531,12 @@ def __init__(self, handler): self._processMap["clear_file_metadata"] = Processor.process_clear_file_metadata self._processMap["cache_file_metadata"] = Processor.process_cache_file_metadata self._processMap["get_metastore_db_uuid"] = Processor.process_get_metastore_db_uuid + self._processMap["create_resource_plan"] = Processor.process_create_resource_plan + self._processMap["get_resource_plan"] = Processor.process_get_resource_plan + self._processMap["get_all_resource_plans"] = Processor.process_get_all_resource_plans + self._processMap["alter_resource_plan"] = Processor.process_alter_resource_plan + self._processMap["validate_resource_plan"] = Processor.process_validate_resource_plan + self._processMap["drop_resource_plan"] = Processor.process_drop_resource_plan def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() @@ -11254,6 +11498,156 @@ def process_get_metastore_db_uuid(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_create_resource_plan(self, seqid, iprot, oprot): + args = create_resource_plan_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_resource_plan_result() + try: + self._handler.create_resource_plan(args.resourcePlan) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except InvalidObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("create_resource_plan", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_resource_plan(self, seqid, iprot, oprot): + args = get_resource_plan_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_resource_plan_result() + try: + result.success = self._handler.get_resource_plan(args.name) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_resource_plan", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_all_resource_plans(self, seqid, iprot, oprot): + args = get_all_resource_plans_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_all_resource_plans_result() + try: + result.success = self._handler.get_all_resource_plans() + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_all_resource_plans", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_resource_plan(self, seqid, iprot, oprot): + args = alter_resource_plan_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_resource_plan_result() + try: + self._handler.alter_resource_plan(args.name, args.resourcePlan) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("alter_resource_plan", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_validate_resource_plan(self, seqid, iprot, oprot): + args = validate_resource_plan_args() + args.read(iprot) + iprot.readMessageEnd() + result = validate_resource_plan_result() + try: + self._handler.validate_resource_plan(args.name) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("validate_resource_plan", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_resource_plan(self, seqid, iprot, oprot): + args = drop_resource_plan_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_resource_plan_result() + try: + self._handler.drop_resource_plan(args.name) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("drop_resource_plan", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + # HELPER FUNCTIONS AND STRUCTURES @@ -31132,19 +31526,1126 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class drop_role_args: +class drop_role_args: + """ + Attributes: + - role_name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'role_name', None, None, ), # 1 + ) + + def __init__(self, role_name=None,): + self.role_name = role_name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.role_name = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('drop_role_args') + if self.role_name is not None: + oprot.writeFieldBegin('role_name', TType.STRING, 1) + oprot.writeString(self.role_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.role_name) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class drop_role_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.BOOL, 'success', None, None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('drop_role_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_role_names_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_role_names_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_role_names_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1078, _size1075) = iprot.readListBegin() + for _i1079 in xrange(_size1075): + _elem1080 = iprot.readString() + self.success.append(_elem1080) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_role_names_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1081 in self.success: + oprot.writeString(iter1081) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class grant_role_args: + """ + Attributes: + - role_name + - principal_name + - principal_type + - grantor + - grantorType + - grant_option + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'role_name', None, None, ), # 1 + (2, TType.STRING, 'principal_name', None, None, ), # 2 + (3, TType.I32, 'principal_type', None, None, ), # 3 + (4, TType.STRING, 'grantor', None, None, ), # 4 + (5, TType.I32, 'grantorType', None, None, ), # 5 + (6, TType.BOOL, 'grant_option', None, None, ), # 6 + ) + + def __init__(self, role_name=None, principal_name=None, principal_type=None, grantor=None, grantorType=None, grant_option=None,): + self.role_name = role_name + self.principal_name = principal_name + self.principal_type = principal_type + self.grantor = grantor + self.grantorType = grantorType + self.grant_option = grant_option + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.role_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.principal_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.principal_type = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.grantor = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.grantorType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.BOOL: + self.grant_option = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('grant_role_args') + if self.role_name is not None: + oprot.writeFieldBegin('role_name', TType.STRING, 1) + oprot.writeString(self.role_name) + oprot.writeFieldEnd() + if self.principal_name is not None: + oprot.writeFieldBegin('principal_name', TType.STRING, 2) + oprot.writeString(self.principal_name) + oprot.writeFieldEnd() + if self.principal_type is not None: + oprot.writeFieldBegin('principal_type', TType.I32, 3) + oprot.writeI32(self.principal_type) + oprot.writeFieldEnd() + if self.grantor is not None: + oprot.writeFieldBegin('grantor', TType.STRING, 4) + oprot.writeString(self.grantor) + oprot.writeFieldEnd() + if self.grantorType is not None: + oprot.writeFieldBegin('grantorType', TType.I32, 5) + oprot.writeI32(self.grantorType) + oprot.writeFieldEnd() + if self.grant_option is not None: + oprot.writeFieldBegin('grant_option', TType.BOOL, 6) + oprot.writeBool(self.grant_option) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.role_name) + value = (value * 31) ^ hash(self.principal_name) + value = (value * 31) ^ hash(self.principal_type) + value = (value * 31) ^ hash(self.grantor) + value = (value * 31) ^ hash(self.grantorType) + value = (value * 31) ^ hash(self.grant_option) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class grant_role_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.BOOL, 'success', None, None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('grant_role_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class revoke_role_args: + """ + Attributes: + - role_name + - principal_name + - principal_type + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'role_name', None, None, ), # 1 + (2, TType.STRING, 'principal_name', None, None, ), # 2 + (3, TType.I32, 'principal_type', None, None, ), # 3 + ) + + def __init__(self, role_name=None, principal_name=None, principal_type=None,): + self.role_name = role_name + self.principal_name = principal_name + self.principal_type = principal_type + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.role_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.principal_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.principal_type = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('revoke_role_args') + if self.role_name is not None: + oprot.writeFieldBegin('role_name', TType.STRING, 1) + oprot.writeString(self.role_name) + oprot.writeFieldEnd() + if self.principal_name is not None: + oprot.writeFieldBegin('principal_name', TType.STRING, 2) + oprot.writeString(self.principal_name) + oprot.writeFieldEnd() + if self.principal_type is not None: + oprot.writeFieldBegin('principal_type', TType.I32, 3) + oprot.writeI32(self.principal_type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.role_name) + value = (value * 31) ^ hash(self.principal_name) + value = (value * 31) ^ hash(self.principal_type) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class revoke_role_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.BOOL, 'success', None, None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('revoke_role_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class list_roles_args: + """ + Attributes: + - principal_name + - principal_type + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'principal_name', None, None, ), # 1 + (2, TType.I32, 'principal_type', None, None, ), # 2 + ) + + def __init__(self, principal_name=None, principal_type=None,): + self.principal_name = principal_name + self.principal_type = principal_type + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.principal_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.principal_type = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('list_roles_args') + if self.principal_name is not None: + oprot.writeFieldBegin('principal_name', TType.STRING, 1) + oprot.writeString(self.principal_name) + oprot.writeFieldEnd() + if self.principal_type is not None: + oprot.writeFieldBegin('principal_type', TType.I32, 2) + oprot.writeI32(self.principal_type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.principal_name) + value = (value * 31) ^ hash(self.principal_type) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class list_roles_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRUCT,(Role, Role.thrift_spec)), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1085, _size1082) = iprot.readListBegin() + for _i1086 in xrange(_size1082): + _elem1087 = Role() + _elem1087.read(iprot) + self.success.append(_elem1087) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('list_roles_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1088 in self.success: + iter1088.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class grant_revoke_role_args: + """ + Attributes: + - request + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'request', (GrantRevokeRoleRequest, GrantRevokeRoleRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, request=None,): + self.request = request + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = GrantRevokeRoleRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('grant_revoke_role_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.request) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class grant_revoke_role_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (GrantRevokeRoleResponse, GrantRevokeRoleResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GrantRevokeRoleResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('grant_revoke_role_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_principals_in_role_args: + """ + Attributes: + - request + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'request', (GetPrincipalsInRoleRequest, GetPrincipalsInRoleRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, request=None,): + self.request = request + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = GetPrincipalsInRoleRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_principals_in_role_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.request) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_principals_in_role_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (GetPrincipalsInRoleResponse, GetPrincipalsInRoleResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetPrincipalsInRoleResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_principals_in_role_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_role_grants_for_principal_args: """ Attributes: - - role_name + - request """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'role_name', None, None, ), # 1 + (1, TType.STRUCT, 'request', (GetRoleGrantsForPrincipalRequest, GetRoleGrantsForPrincipalRequest.thrift_spec), None, ), # 1 ) - def __init__(self, role_name=None,): - self.role_name = role_name + def __init__(self, request=None,): + self.request = request def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -31156,8 +32657,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.role_name = iprot.readString() + if ftype == TType.STRUCT: + self.request = GetRoleGrantsForPrincipalRequest() + self.request.read(iprot) else: iprot.skip(ftype) else: @@ -31169,10 +32671,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('drop_role_args') - if self.role_name is not None: - oprot.writeFieldBegin('role_name', TType.STRING, 1) - oprot.writeString(self.role_name) + oprot.writeStructBegin('get_role_grants_for_principal_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -31183,7 +32685,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.role_name) + value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -31197,7 +32699,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class drop_role_result: +class get_role_grants_for_principal_result: """ Attributes: - success @@ -31205,7 +32707,7 @@ class drop_role_result: """ thrift_spec = ( - (0, TType.BOOL, 'success', None, None, ), # 0 + (0, TType.STRUCT, 'success', (GetRoleGrantsForPrincipalResponse, GetRoleGrantsForPrincipalResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -31223,8 +32725,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.BOOL: - self.success = iprot.readBool() + if ftype == TType.STRUCT: + self.success = GetRoleGrantsForPrincipalResponse() + self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: @@ -31242,10 +32745,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('drop_role_result') + oprot.writeStructBegin('get_role_grants_for_principal_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.BOOL, 0) - oprot.writeBool(self.success) + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -31275,11 +32778,26 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_role_names_args: +class get_privilege_set_args: + """ + Attributes: + - hiveObject + - user_name + - group_names + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'hiveObject', (HiveObjectRef, HiveObjectRef.thrift_spec), None, ), # 1 + (2, TType.STRING, 'user_name', None, None, ), # 2 + (3, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 3 ) + def __init__(self, hiveObject=None, user_name=None, group_names=None,): + self.hiveObject = hiveObject + self.user_name = user_name + self.group_names = group_names + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -31289,6 +32807,27 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.hiveObject = HiveObjectRef() + self.hiveObject.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.user_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.group_names = [] + (_etype1092, _size1089) = iprot.readListBegin() + for _i1093 in xrange(_size1089): + _elem1094 = iprot.readString() + self.group_names.append(_elem1094) + iprot.readListEnd() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -31298,7 +32837,22 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_role_names_args') + oprot.writeStructBegin('get_privilege_set_args') + if self.hiveObject is not None: + oprot.writeFieldBegin('hiveObject', TType.STRUCT, 1) + self.hiveObject.write(oprot) + oprot.writeFieldEnd() + if self.user_name is not None: + oprot.writeFieldBegin('user_name', TType.STRING, 2) + oprot.writeString(self.user_name) + oprot.writeFieldEnd() + if self.group_names is not None: + oprot.writeFieldBegin('group_names', TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.group_names)) + for iter1095 in self.group_names: + oprot.writeString(iter1095) + oprot.writeListEnd() + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -31308,6 +32862,9 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.hiveObject) + value = (value * 31) ^ hash(self.user_name) + value = (value * 31) ^ hash(self.group_names) return value def __repr__(self): @@ -31321,7 +32878,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_role_names_result: +class get_privilege_set_result: """ Attributes: - success @@ -31329,7 +32886,7 @@ class get_role_names_result: """ thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (0, TType.STRUCT, 'success', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -31347,13 +32904,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.LIST: - self.success = [] - (_etype1078, _size1075) = iprot.readListBegin() - for _i1079 in xrange(_size1075): - _elem1080 = iprot.readString() - self.success.append(_elem1080) - iprot.readListEnd() + if ftype == TType.STRUCT: + self.success = PrincipalPrivilegeSet() + self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: @@ -31371,13 +32924,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_role_names_result') + oprot.writeStructBegin('get_privilege_set_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1081 in self.success: - oprot.writeString(iter1081) - oprot.writeListEnd() + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -31407,34 +32957,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_role_args: +class list_privileges_args: """ Attributes: - - role_name - principal_name - principal_type - - grantor - - grantorType - - grant_option + - hiveObject """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'role_name', None, None, ), # 1 - (2, TType.STRING, 'principal_name', None, None, ), # 2 - (3, TType.I32, 'principal_type', None, None, ), # 3 - (4, TType.STRING, 'grantor', None, None, ), # 4 - (5, TType.I32, 'grantorType', None, None, ), # 5 - (6, TType.BOOL, 'grant_option', None, None, ), # 6 + (1, TType.STRING, 'principal_name', None, None, ), # 1 + (2, TType.I32, 'principal_type', None, None, ), # 2 + (3, TType.STRUCT, 'hiveObject', (HiveObjectRef, HiveObjectRef.thrift_spec), None, ), # 3 ) - def __init__(self, role_name=None, principal_name=None, principal_type=None, grantor=None, grantorType=None, grant_option=None,): - self.role_name = role_name + def __init__(self, principal_name=None, principal_type=None, hiveObject=None,): self.principal_name = principal_name self.principal_type = principal_type - self.grantor = grantor - self.grantorType = grantorType - self.grant_option = grant_option + self.hiveObject = hiveObject def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -31446,33 +32987,19 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.role_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: if ftype == TType.STRING: self.principal_name = iprot.readString() else: iprot.skip(ftype) - elif fid == 3: + elif fid == 2: if ftype == TType.I32: self.principal_type = iprot.readI32() else: iprot.skip(ftype) - elif fid == 4: - if ftype == TType.STRING: - self.grantor = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 5: - if ftype == TType.I32: - self.grantorType = iprot.readI32() - else: - iprot.skip(ftype) - elif fid == 6: - if ftype == TType.BOOL: - self.grant_option = iprot.readBool() + elif fid == 3: + if ftype == TType.STRUCT: + self.hiveObject = HiveObjectRef() + self.hiveObject.read(iprot) else: iprot.skip(ftype) else: @@ -31484,30 +33011,18 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_role_args') - if self.role_name is not None: - oprot.writeFieldBegin('role_name', TType.STRING, 1) - oprot.writeString(self.role_name) - oprot.writeFieldEnd() + oprot.writeStructBegin('list_privileges_args') if self.principal_name is not None: - oprot.writeFieldBegin('principal_name', TType.STRING, 2) + oprot.writeFieldBegin('principal_name', TType.STRING, 1) oprot.writeString(self.principal_name) oprot.writeFieldEnd() if self.principal_type is not None: - oprot.writeFieldBegin('principal_type', TType.I32, 3) + oprot.writeFieldBegin('principal_type', TType.I32, 2) oprot.writeI32(self.principal_type) oprot.writeFieldEnd() - if self.grantor is not None: - oprot.writeFieldBegin('grantor', TType.STRING, 4) - oprot.writeString(self.grantor) - oprot.writeFieldEnd() - if self.grantorType is not None: - oprot.writeFieldBegin('grantorType', TType.I32, 5) - oprot.writeI32(self.grantorType) - oprot.writeFieldEnd() - if self.grant_option is not None: - oprot.writeFieldBegin('grant_option', TType.BOOL, 6) - oprot.writeBool(self.grant_option) + if self.hiveObject is not None: + oprot.writeFieldBegin('hiveObject', TType.STRUCT, 3) + self.hiveObject.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -31518,12 +33033,9 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.role_name) value = (value * 31) ^ hash(self.principal_name) value = (value * 31) ^ hash(self.principal_type) - value = (value * 31) ^ hash(self.grantor) - value = (value * 31) ^ hash(self.grantorType) - value = (value * 31) ^ hash(self.grant_option) + value = (value * 31) ^ hash(self.hiveObject) return value def __repr__(self): @@ -31537,7 +33049,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_role_result: +class list_privileges_result: """ Attributes: - success @@ -31545,7 +33057,7 @@ class grant_role_result: """ thrift_spec = ( - (0, TType.BOOL, 'success', None, None, ), # 0 + (0, TType.LIST, 'success', (TType.STRUCT,(HiveObjectPrivilege, HiveObjectPrivilege.thrift_spec)), None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -31563,8 +33075,14 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.BOOL: - self.success = iprot.readBool() + if ftype == TType.LIST: + self.success = [] + (_etype1099, _size1096) = iprot.readListBegin() + for _i1100 in xrange(_size1096): + _elem1101 = HiveObjectPrivilege() + _elem1101.read(iprot) + self.success.append(_elem1101) + iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: @@ -31582,10 +33100,13 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_role_result') + oprot.writeStructBegin('list_privileges_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.BOOL, 0) - oprot.writeBool(self.success) + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1102 in self.success: + iter1102.write(oprot) + oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -31615,25 +33136,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class revoke_role_args: +class grant_privileges_args: """ Attributes: - - role_name - - principal_name - - principal_type + - privileges """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'role_name', None, None, ), # 1 - (2, TType.STRING, 'principal_name', None, None, ), # 2 - (3, TType.I32, 'principal_type', None, None, ), # 3 + (1, TType.STRUCT, 'privileges', (PrivilegeBag, PrivilegeBag.thrift_spec), None, ), # 1 ) - def __init__(self, role_name=None, principal_name=None, principal_type=None,): - self.role_name = role_name - self.principal_name = principal_name - self.principal_type = principal_type + def __init__(self, privileges=None,): + self.privileges = privileges def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -31645,18 +33160,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.role_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.principal_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.I32: - self.principal_type = iprot.readI32() + if ftype == TType.STRUCT: + self.privileges = PrivilegeBag() + self.privileges.read(iprot) else: iprot.skip(ftype) else: @@ -31668,18 +33174,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('revoke_role_args') - if self.role_name is not None: - oprot.writeFieldBegin('role_name', TType.STRING, 1) - oprot.writeString(self.role_name) - oprot.writeFieldEnd() - if self.principal_name is not None: - oprot.writeFieldBegin('principal_name', TType.STRING, 2) - oprot.writeString(self.principal_name) - oprot.writeFieldEnd() - if self.principal_type is not None: - oprot.writeFieldBegin('principal_type', TType.I32, 3) - oprot.writeI32(self.principal_type) + oprot.writeStructBegin('grant_privileges_args') + if self.privileges is not None: + oprot.writeFieldBegin('privileges', TType.STRUCT, 1) + self.privileges.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -31690,9 +33188,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.role_name) - value = (value * 31) ^ hash(self.principal_name) - value = (value * 31) ^ hash(self.principal_type) + value = (value * 31) ^ hash(self.privileges) return value def __repr__(self): @@ -31706,7 +33202,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class revoke_role_result: +class grant_privileges_result: """ Attributes: - success @@ -31751,7 +33247,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('revoke_role_result') + oprot.writeStructBegin('grant_privileges_result') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) @@ -31784,22 +33280,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class list_roles_args: +class revoke_privileges_args: """ Attributes: - - principal_name - - principal_type + - privileges """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'principal_name', None, None, ), # 1 - (2, TType.I32, 'principal_type', None, None, ), # 2 + (1, TType.STRUCT, 'privileges', (PrivilegeBag, PrivilegeBag.thrift_spec), None, ), # 1 ) - def __init__(self, principal_name=None, principal_type=None,): - self.principal_name = principal_name - self.principal_type = principal_type + def __init__(self, privileges=None,): + self.privileges = privileges def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -31811,13 +33304,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.principal_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.I32: - self.principal_type = iprot.readI32() + if ftype == TType.STRUCT: + self.privileges = PrivilegeBag() + self.privileges.read(iprot) else: iprot.skip(ftype) else: @@ -31829,14 +33318,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('list_roles_args') - if self.principal_name is not None: - oprot.writeFieldBegin('principal_name', TType.STRING, 1) - oprot.writeString(self.principal_name) - oprot.writeFieldEnd() - if self.principal_type is not None: - oprot.writeFieldBegin('principal_type', TType.I32, 2) - oprot.writeI32(self.principal_type) + oprot.writeStructBegin('revoke_privileges_args') + if self.privileges is not None: + oprot.writeFieldBegin('privileges', TType.STRUCT, 1) + self.privileges.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -31847,8 +33332,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.principal_name) - value = (value * 31) ^ hash(self.principal_type) + value = (value * 31) ^ hash(self.privileges) return value def __repr__(self): @@ -31862,7 +33346,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class list_roles_result: +class revoke_privileges_result: """ Attributes: - success @@ -31870,7 +33354,7 @@ class list_roles_result: """ thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRUCT,(Role, Role.thrift_spec)), None, ), # 0 + (0, TType.BOOL, 'success', None, None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -31888,14 +33372,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.LIST: - self.success = [] - (_etype1085, _size1082) = iprot.readListBegin() - for _i1086 in xrange(_size1082): - _elem1087 = Role() - _elem1087.read(iprot) - self.success.append(_elem1087) - iprot.readListEnd() + if ftype == TType.BOOL: + self.success = iprot.readBool() else: iprot.skip(ftype) elif fid == 1: @@ -31913,13 +33391,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('list_roles_result') + oprot.writeStructBegin('revoke_privileges_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1088 in self.success: - iter1088.write(oprot) - oprot.writeListEnd() + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -31949,7 +33424,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_revoke_role_args: +class grant_revoke_privileges_args: """ Attributes: - request @@ -31957,7 +33432,7 @@ class grant_revoke_role_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (GrantRevokeRoleRequest, GrantRevokeRoleRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (GrantRevokePrivilegeRequest, GrantRevokePrivilegeRequest.thrift_spec), None, ), # 1 ) def __init__(self, request=None,): @@ -31974,7 +33449,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.request = GrantRevokeRoleRequest() + self.request = GrantRevokePrivilegeRequest() self.request.read(iprot) else: iprot.skip(ftype) @@ -31987,7 +33462,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_revoke_role_args') + oprot.writeStructBegin('grant_revoke_privileges_args') if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 1) self.request.write(oprot) @@ -32015,7 +33490,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_revoke_role_result: +class grant_revoke_privileges_result: """ Attributes: - success @@ -32023,7 +33498,7 @@ class grant_revoke_role_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GrantRevokeRoleResponse, GrantRevokeRoleResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (GrantRevokePrivilegeResponse, GrantRevokePrivilegeResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -32042,7 +33517,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = GrantRevokeRoleResponse() + self.success = GrantRevokePrivilegeResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -32061,7 +33536,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_revoke_role_result') + oprot.writeStructBegin('grant_revoke_privileges_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -32094,19 +33569,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_principals_in_role_args: +class set_ugi_args: """ Attributes: - - request + - user_name + - group_names """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (GetPrincipalsInRoleRequest, GetPrincipalsInRoleRequest.thrift_spec), None, ), # 1 + (1, TType.STRING, 'user_name', None, None, ), # 1 + (2, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 2 ) - def __init__(self, request=None,): - self.request = request + def __init__(self, user_name=None, group_names=None,): + self.user_name = user_name + self.group_names = group_names def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -32118,9 +33596,18 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.request = GetPrincipalsInRoleRequest() - self.request.read(iprot) + if ftype == TType.STRING: + self.user_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.group_names = [] + (_etype1106, _size1103) = iprot.readListBegin() + for _i1107 in xrange(_size1103): + _elem1108 = iprot.readString() + self.group_names.append(_elem1108) + iprot.readListEnd() else: iprot.skip(ftype) else: @@ -32132,10 +33619,17 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_principals_in_role_args') - if self.request is not None: - oprot.writeFieldBegin('request', TType.STRUCT, 1) - self.request.write(oprot) + oprot.writeStructBegin('set_ugi_args') + if self.user_name is not None: + oprot.writeFieldBegin('user_name', TType.STRING, 1) + oprot.writeString(self.user_name) + oprot.writeFieldEnd() + if self.group_names is not None: + oprot.writeFieldBegin('group_names', TType.LIST, 2) + oprot.writeListBegin(TType.STRING, len(self.group_names)) + for iter1109 in self.group_names: + oprot.writeString(iter1109) + oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -32146,7 +33640,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.request) + value = (value * 31) ^ hash(self.user_name) + value = (value * 31) ^ hash(self.group_names) return value def __repr__(self): @@ -32160,7 +33655,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_principals_in_role_result: +class set_ugi_result: """ Attributes: - success @@ -32168,7 +33663,7 @@ class get_principals_in_role_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetPrincipalsInRoleResponse, GetPrincipalsInRoleResponse.thrift_spec), None, ), # 0 + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -32186,9 +33681,13 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRUCT: - self.success = GetPrincipalsInRoleResponse() - self.success.read(iprot) + if ftype == TType.LIST: + self.success = [] + (_etype1113, _size1110) = iprot.readListBegin() + for _i1114 in xrange(_size1110): + _elem1115 = iprot.readString() + self.success.append(_elem1115) + iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: @@ -32206,10 +33705,13 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_principals_in_role_result') + oprot.writeStructBegin('set_ugi_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1116 in self.success: + oprot.writeString(iter1116) + oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -32239,19 +33741,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_role_grants_for_principal_args: +class get_delegation_token_args: """ Attributes: - - request + - token_owner + - renewer_kerberos_principal_name """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (GetRoleGrantsForPrincipalRequest, GetRoleGrantsForPrincipalRequest.thrift_spec), None, ), # 1 + (1, TType.STRING, 'token_owner', None, None, ), # 1 + (2, TType.STRING, 'renewer_kerberos_principal_name', None, None, ), # 2 ) - def __init__(self, request=None,): - self.request = request + def __init__(self, token_owner=None, renewer_kerberos_principal_name=None,): + self.token_owner = token_owner + self.renewer_kerberos_principal_name = renewer_kerberos_principal_name def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -32263,9 +33768,13 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.request = GetRoleGrantsForPrincipalRequest() - self.request.read(iprot) + if ftype == TType.STRING: + self.token_owner = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.renewer_kerberos_principal_name = iprot.readString() else: iprot.skip(ftype) else: @@ -32277,10 +33786,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_role_grants_for_principal_args') - if self.request is not None: - oprot.writeFieldBegin('request', TType.STRUCT, 1) - self.request.write(oprot) + oprot.writeStructBegin('get_delegation_token_args') + if self.token_owner is not None: + oprot.writeFieldBegin('token_owner', TType.STRING, 1) + oprot.writeString(self.token_owner) + oprot.writeFieldEnd() + if self.renewer_kerberos_principal_name is not None: + oprot.writeFieldBegin('renewer_kerberos_principal_name', TType.STRING, 2) + oprot.writeString(self.renewer_kerberos_principal_name) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -32291,7 +33804,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.request) + value = (value * 31) ^ hash(self.token_owner) + value = (value * 31) ^ hash(self.renewer_kerberos_principal_name) return value def __repr__(self): @@ -32305,7 +33819,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_role_grants_for_principal_result: +class get_delegation_token_result: """ Attributes: - success @@ -32313,7 +33827,7 @@ class get_role_grants_for_principal_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetRoleGrantsForPrincipalResponse, GetRoleGrantsForPrincipalResponse.thrift_spec), None, ), # 0 + (0, TType.STRING, 'success', None, None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -32331,9 +33845,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRUCT: - self.success = GetRoleGrantsForPrincipalResponse() - self.success.read(iprot) + if ftype == TType.STRING: + self.success = iprot.readString() else: iprot.skip(ftype) elif fid == 1: @@ -32351,10 +33864,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_role_grants_for_principal_result') + oprot.writeStructBegin('get_delegation_token_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeFieldBegin('success', TType.STRING, 0) + oprot.writeString(self.success) oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -32384,25 +33897,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_privilege_set_args: +class renew_delegation_token_args: """ Attributes: - - hiveObject - - user_name - - group_names + - token_str_form """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'hiveObject', (HiveObjectRef, HiveObjectRef.thrift_spec), None, ), # 1 - (2, TType.STRING, 'user_name', None, None, ), # 2 - (3, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 3 + (1, TType.STRING, 'token_str_form', None, None, ), # 1 ) - def __init__(self, hiveObject=None, user_name=None, group_names=None,): - self.hiveObject = hiveObject - self.user_name = user_name - self.group_names = group_names + def __init__(self, token_str_form=None,): + self.token_str_form = token_str_form def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -32414,24 +33921,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.hiveObject = HiveObjectRef() - self.hiveObject.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: if ftype == TType.STRING: - self.user_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.LIST: - self.group_names = [] - (_etype1092, _size1089) = iprot.readListBegin() - for _i1093 in xrange(_size1089): - _elem1094 = iprot.readString() - self.group_names.append(_elem1094) - iprot.readListEnd() + self.token_str_form = iprot.readString() else: iprot.skip(ftype) else: @@ -32443,21 +33934,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_privilege_set_args') - if self.hiveObject is not None: - oprot.writeFieldBegin('hiveObject', TType.STRUCT, 1) - self.hiveObject.write(oprot) - oprot.writeFieldEnd() - if self.user_name is not None: - oprot.writeFieldBegin('user_name', TType.STRING, 2) - oprot.writeString(self.user_name) - oprot.writeFieldEnd() - if self.group_names is not None: - oprot.writeFieldBegin('group_names', TType.LIST, 3) - oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1095 in self.group_names: - oprot.writeString(iter1095) - oprot.writeListEnd() + oprot.writeStructBegin('renew_delegation_token_args') + if self.token_str_form is not None: + oprot.writeFieldBegin('token_str_form', TType.STRING, 1) + oprot.writeString(self.token_str_form) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -32468,9 +33948,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.hiveObject) - value = (value * 31) ^ hash(self.user_name) - value = (value * 31) ^ hash(self.group_names) + value = (value * 31) ^ hash(self.token_str_form) return value def __repr__(self): @@ -32484,7 +33962,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_privilege_set_result: +class renew_delegation_token_result: """ Attributes: - success @@ -32492,7 +33970,7 @@ class get_privilege_set_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 0 + (0, TType.I64, 'success', None, None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -32510,9 +33988,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRUCT: - self.success = PrincipalPrivilegeSet() - self.success.read(iprot) + if ftype == TType.I64: + self.success = iprot.readI64() else: iprot.skip(ftype) elif fid == 1: @@ -32530,10 +34007,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_privilege_set_result') + oprot.writeStructBegin('renew_delegation_token_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeFieldBegin('success', TType.I64, 0) + oprot.writeI64(self.success) oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -32563,25 +34040,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class list_privileges_args: +class cancel_delegation_token_args: """ Attributes: - - principal_name - - principal_type - - hiveObject + - token_str_form """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'principal_name', None, None, ), # 1 - (2, TType.I32, 'principal_type', None, None, ), # 2 - (3, TType.STRUCT, 'hiveObject', (HiveObjectRef, HiveObjectRef.thrift_spec), None, ), # 3 + (1, TType.STRING, 'token_str_form', None, None, ), # 1 ) - def __init__(self, principal_name=None, principal_type=None, hiveObject=None,): - self.principal_name = principal_name - self.principal_type = principal_type - self.hiveObject = hiveObject + def __init__(self, token_str_form=None,): + self.token_str_form = token_str_form def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -32594,18 +34065,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.principal_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.I32: - self.principal_type = iprot.readI32() - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.STRUCT: - self.hiveObject = HiveObjectRef() - self.hiveObject.read(iprot) + self.token_str_form = iprot.readString() else: iprot.skip(ftype) else: @@ -32617,18 +34077,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('list_privileges_args') - if self.principal_name is not None: - oprot.writeFieldBegin('principal_name', TType.STRING, 1) - oprot.writeString(self.principal_name) - oprot.writeFieldEnd() - if self.principal_type is not None: - oprot.writeFieldBegin('principal_type', TType.I32, 2) - oprot.writeI32(self.principal_type) - oprot.writeFieldEnd() - if self.hiveObject is not None: - oprot.writeFieldBegin('hiveObject', TType.STRUCT, 3) - self.hiveObject.write(oprot) + oprot.writeStructBegin('cancel_delegation_token_args') + if self.token_str_form is not None: + oprot.writeFieldBegin('token_str_form', TType.STRING, 1) + oprot.writeString(self.token_str_form) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -32639,9 +34091,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.principal_name) - value = (value * 31) ^ hash(self.principal_type) - value = (value * 31) ^ hash(self.hiveObject) + value = (value * 31) ^ hash(self.token_str_form) return value def __repr__(self): @@ -32655,20 +34105,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class list_privileges_result: +class cancel_delegation_token_result: """ Attributes: - - success - o1 """ thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRUCT,(HiveObjectPrivilege, HiveObjectPrivilege.thrift_spec)), None, ), # 0 + None, # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None, o1=None,): - self.success = success + def __init__(self, o1=None,): self.o1 = o1 def read(self, iprot): @@ -32680,18 +34128,7 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.LIST: - self.success = [] - (_etype1099, _size1096) = iprot.readListBegin() - for _i1100 in xrange(_size1096): - _elem1101 = HiveObjectPrivilege() - _elem1101.read(iprot) - self.success.append(_elem1101) - iprot.readListEnd() - else: - iprot.skip(ftype) - elif fid == 1: + if fid == 1: if ftype == TType.STRUCT: self.o1 = MetaException() self.o1.read(iprot) @@ -32706,14 +34143,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('list_privileges_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1102 in self.success: - iter1102.write(oprot) - oprot.writeListEnd() - oprot.writeFieldEnd() + oprot.writeStructBegin('cancel_delegation_token_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -32727,7 +34157,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) return value @@ -32742,19 +34171,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_privileges_args: +class add_token_args: """ Attributes: - - privileges + - token_identifier + - delegation_token """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'privileges', (PrivilegeBag, PrivilegeBag.thrift_spec), None, ), # 1 + (1, TType.STRING, 'token_identifier', None, None, ), # 1 + (2, TType.STRING, 'delegation_token', None, None, ), # 2 ) - def __init__(self, privileges=None,): - self.privileges = privileges + def __init__(self, token_identifier=None, delegation_token=None,): + self.token_identifier = token_identifier + self.delegation_token = delegation_token def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -32766,9 +34198,13 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.privileges = PrivilegeBag() - self.privileges.read(iprot) + if ftype == TType.STRING: + self.token_identifier = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.delegation_token = iprot.readString() else: iprot.skip(ftype) else: @@ -32780,10 +34216,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_privileges_args') - if self.privileges is not None: - oprot.writeFieldBegin('privileges', TType.STRUCT, 1) - self.privileges.write(oprot) + oprot.writeStructBegin('add_token_args') + if self.token_identifier is not None: + oprot.writeFieldBegin('token_identifier', TType.STRING, 1) + oprot.writeString(self.token_identifier) + oprot.writeFieldEnd() + if self.delegation_token is not None: + oprot.writeFieldBegin('delegation_token', TType.STRING, 2) + oprot.writeString(self.delegation_token) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -32794,7 +34234,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.privileges) + value = (value * 31) ^ hash(self.token_identifier) + value = (value * 31) ^ hash(self.delegation_token) return value def __repr__(self): @@ -32808,21 +34249,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_privileges_result: +class add_token_result: """ Attributes: - success - - o1 """ thrift_spec = ( (0, TType.BOOL, 'success', None, None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None, o1=None,): + def __init__(self, success=None,): self.success = success - self.o1 = o1 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -32838,12 +34276,6 @@ def read(self, iprot): self.success = iprot.readBool() else: iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -32853,15 +34285,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_privileges_result') + oprot.writeStructBegin('add_token_result') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -32872,7 +34300,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -32886,19 +34313,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class revoke_privileges_args: +class remove_token_args: """ Attributes: - - privileges + - token_identifier """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'privileges', (PrivilegeBag, PrivilegeBag.thrift_spec), None, ), # 1 + (1, TType.STRING, 'token_identifier', None, None, ), # 1 ) - def __init__(self, privileges=None,): - self.privileges = privileges + def __init__(self, token_identifier=None,): + self.token_identifier = token_identifier def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -32910,9 +34337,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.privileges = PrivilegeBag() - self.privileges.read(iprot) + if ftype == TType.STRING: + self.token_identifier = iprot.readString() else: iprot.skip(ftype) else: @@ -32924,10 +34350,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('revoke_privileges_args') - if self.privileges is not None: - oprot.writeFieldBegin('privileges', TType.STRUCT, 1) - self.privileges.write(oprot) + oprot.writeStructBegin('remove_token_args') + if self.token_identifier is not None: + oprot.writeFieldBegin('token_identifier', TType.STRING, 1) + oprot.writeString(self.token_identifier) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -32938,7 +34364,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.privileges) + value = (value * 31) ^ hash(self.token_identifier) return value def __repr__(self): @@ -32952,21 +34378,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class revoke_privileges_result: +class remove_token_result: """ Attributes: - success - - o1 """ thrift_spec = ( (0, TType.BOOL, 'success', None, None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None, o1=None,): + def __init__(self, success=None,): self.success = success - self.o1 = o1 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -32982,12 +34405,6 @@ def read(self, iprot): self.success = iprot.readBool() else: iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -32997,15 +34414,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('revoke_privileges_result') + oprot.writeStructBegin('remove_token_result') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33016,7 +34429,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -33030,19 +34442,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_revoke_privileges_args: +class get_token_args: """ Attributes: - - request + - token_identifier """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'request', (GrantRevokePrivilegeRequest, GrantRevokePrivilegeRequest.thrift_spec), None, ), # 1 + (1, TType.STRING, 'token_identifier', None, None, ), # 1 ) - def __init__(self, request=None,): - self.request = request + def __init__(self, token_identifier=None,): + self.token_identifier = token_identifier def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33054,9 +34466,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.request = GrantRevokePrivilegeRequest() - self.request.read(iprot) + if ftype == TType.STRING: + self.token_identifier = iprot.readString() else: iprot.skip(ftype) else: @@ -33068,10 +34479,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_revoke_privileges_args') - if self.request is not None: - oprot.writeFieldBegin('request', TType.STRUCT, 1) - self.request.write(oprot) + oprot.writeStructBegin('get_token_args') + if self.token_identifier is not None: + oprot.writeFieldBegin('token_identifier', TType.STRING, 1) + oprot.writeString(self.token_identifier) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33082,7 +34493,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.request) + value = (value * 31) ^ hash(self.token_identifier) return value def __repr__(self): @@ -33096,21 +34507,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class grant_revoke_privileges_result: +class get_token_result: """ Attributes: - success - - o1 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GrantRevokePrivilegeResponse, GrantRevokePrivilegeResponse.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (0, TType.STRING, 'success', None, None, ), # 0 ) - def __init__(self, success=None, o1=None,): + def __init__(self, success=None,): self.success = success - self.o1 = o1 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33122,15 +34530,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRUCT: - self.success = GrantRevokePrivilegeResponse() - self.success.read(iprot) - else: - iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) + if ftype == TType.STRING: + self.success = iprot.readString() else: iprot.skip(ftype) else: @@ -33142,14 +34543,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('grant_revoke_privileges_result') + oprot.writeStructBegin('get_token_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) - oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) + oprot.writeFieldBegin('success', TType.STRING, 0) + oprot.writeString(self.success) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33161,7 +34558,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -33175,23 +34571,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class set_ugi_args: - """ - Attributes: - - user_name - - group_names - """ +class get_all_token_identifiers_args: thrift_spec = ( - None, # 0 - (1, TType.STRING, 'user_name', None, None, ), # 1 - (2, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 2 ) - def __init__(self, user_name=None, group_names=None,): - self.user_name = user_name - self.group_names = group_names - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -33201,21 +34585,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRING: - self.user_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.LIST: - self.group_names = [] - (_etype1106, _size1103) = iprot.readListBegin() - for _i1107 in xrange(_size1103): - _elem1108 = iprot.readString() - self.group_names.append(_elem1108) - iprot.readListEnd() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -33225,18 +34594,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('set_ugi_args') - if self.user_name is not None: - oprot.writeFieldBegin('user_name', TType.STRING, 1) - oprot.writeString(self.user_name) - oprot.writeFieldEnd() - if self.group_names is not None: - oprot.writeFieldBegin('group_names', TType.LIST, 2) - oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1109 in self.group_names: - oprot.writeString(iter1109) - oprot.writeListEnd() - oprot.writeFieldEnd() + oprot.writeStructBegin('get_all_token_identifiers_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -33246,8 +34604,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.user_name) - value = (value * 31) ^ hash(self.group_names) return value def __repr__(self): @@ -33261,21 +34617,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class set_ugi_result: +class get_all_token_identifiers_result: """ Attributes: - success - - o1 """ thrift_spec = ( (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None, o1=None,): + def __init__(self, success=None,): self.success = success - self.o1 = o1 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33289,19 +34642,13 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1113, _size1110) = iprot.readListBegin() - for _i1114 in xrange(_size1110): - _elem1115 = iprot.readString() - self.success.append(_elem1115) + (_etype1120, _size1117) = iprot.readListBegin() + for _i1121 in xrange(_size1117): + _elem1122 = iprot.readString() + self.success.append(_elem1122) iprot.readListEnd() else: iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -33311,18 +34658,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('set_ugi_result') + oprot.writeStructBegin('get_all_token_identifiers_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1116 in self.success: - oprot.writeString(iter1116) + for iter1123 in self.success: + oprot.writeString(iter1123) oprot.writeListEnd() oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33333,7 +34676,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -33347,22 +34689,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_delegation_token_args: +class add_master_key_args: """ Attributes: - - token_owner - - renewer_kerberos_principal_name + - key """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'token_owner', None, None, ), # 1 - (2, TType.STRING, 'renewer_kerberos_principal_name', None, None, ), # 2 + (1, TType.STRING, 'key', None, None, ), # 1 ) - def __init__(self, token_owner=None, renewer_kerberos_principal_name=None,): - self.token_owner = token_owner - self.renewer_kerberos_principal_name = renewer_kerberos_principal_name + def __init__(self, key=None,): + self.key = key def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33375,12 +34714,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.token_owner = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.renewer_kerberos_principal_name = iprot.readString() + self.key = iprot.readString() else: iprot.skip(ftype) else: @@ -33392,14 +34726,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_delegation_token_args') - if self.token_owner is not None: - oprot.writeFieldBegin('token_owner', TType.STRING, 1) - oprot.writeString(self.token_owner) - oprot.writeFieldEnd() - if self.renewer_kerberos_principal_name is not None: - oprot.writeFieldBegin('renewer_kerberos_principal_name', TType.STRING, 2) - oprot.writeString(self.renewer_kerberos_principal_name) + oprot.writeStructBegin('add_master_key_args') + if self.key is not None: + oprot.writeFieldBegin('key', TType.STRING, 1) + oprot.writeString(self.key) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33410,8 +34740,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.token_owner) - value = (value * 31) ^ hash(self.renewer_kerberos_principal_name) + value = (value * 31) ^ hash(self.key) return value def __repr__(self): @@ -33425,7 +34754,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_delegation_token_result: +class add_master_key_result: """ Attributes: - success @@ -33433,7 +34762,7 @@ class get_delegation_token_result: """ thrift_spec = ( - (0, TType.STRING, 'success', None, None, ), # 0 + (0, TType.I32, 'success', None, None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -33451,8 +34780,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRING: - self.success = iprot.readString() + if ftype == TType.I32: + self.success = iprot.readI32() else: iprot.skip(ftype) elif fid == 1: @@ -33470,10 +34799,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_delegation_token_result') + oprot.writeStructBegin('add_master_key_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRING, 0) - oprot.writeString(self.success) + oprot.writeFieldBegin('success', TType.I32, 0) + oprot.writeI32(self.success) oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -33503,19 +34832,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class renew_delegation_token_args: +class update_master_key_args: """ Attributes: - - token_str_form + - seq_number + - key """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'token_str_form', None, None, ), # 1 + (1, TType.I32, 'seq_number', None, None, ), # 1 + (2, TType.STRING, 'key', None, None, ), # 2 ) - def __init__(self, token_str_form=None,): - self.token_str_form = token_str_form + def __init__(self, seq_number=None, key=None,): + self.seq_number = seq_number + self.key = key def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33527,8 +34859,13 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: + if ftype == TType.I32: + self.seq_number = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 2: if ftype == TType.STRING: - self.token_str_form = iprot.readString() + self.key = iprot.readString() else: iprot.skip(ftype) else: @@ -33540,10 +34877,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('renew_delegation_token_args') - if self.token_str_form is not None: - oprot.writeFieldBegin('token_str_form', TType.STRING, 1) - oprot.writeString(self.token_str_form) + oprot.writeStructBegin('update_master_key_args') + if self.seq_number is not None: + oprot.writeFieldBegin('seq_number', TType.I32, 1) + oprot.writeI32(self.seq_number) + oprot.writeFieldEnd() + if self.key is not None: + oprot.writeFieldBegin('key', TType.STRING, 2) + oprot.writeString(self.key) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33554,7 +34895,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.token_str_form) + value = (value * 31) ^ hash(self.seq_number) + value = (value * 31) ^ hash(self.key) return value def __repr__(self): @@ -33568,21 +34910,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class renew_delegation_token_result: +class update_master_key_result: """ Attributes: - - success - o1 + - o2 """ thrift_spec = ( - (0, TType.I64, 'success', None, None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None, o1=None,): - self.success = success + def __init__(self, o1=None, o2=None,): self.o1 = o1 + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33593,15 +34936,16 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.I64: - self.success = iprot.readI64() + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) else: iprot.skip(ftype) - elif fid == 1: + elif fid == 2: if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) + self.o2 = MetaException() + self.o2.read(iprot) else: iprot.skip(ftype) else: @@ -33613,15 +34957,15 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('renew_delegation_token_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.I64, 0) - oprot.writeI64(self.success) - oprot.writeFieldEnd() + oprot.writeStructBegin('update_master_key_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33631,8 +34975,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -33646,19 +34990,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class cancel_delegation_token_args: +class remove_master_key_args: """ Attributes: - - token_str_form + - key_seq """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'token_str_form', None, None, ), # 1 + (1, TType.I32, 'key_seq', None, None, ), # 1 ) - def __init__(self, token_str_form=None,): - self.token_str_form = token_str_form + def __init__(self, key_seq=None,): + self.key_seq = key_seq def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33670,8 +35014,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.token_str_form = iprot.readString() + if ftype == TType.I32: + self.key_seq = iprot.readI32() else: iprot.skip(ftype) else: @@ -33683,10 +35027,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('cancel_delegation_token_args') - if self.token_str_form is not None: - oprot.writeFieldBegin('token_str_form', TType.STRING, 1) - oprot.writeString(self.token_str_form) + oprot.writeStructBegin('remove_master_key_args') + if self.key_seq is not None: + oprot.writeFieldBegin('key_seq', TType.I32, 1) + oprot.writeI32(self.key_seq) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33697,7 +35041,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.token_str_form) + value = (value * 31) ^ hash(self.key_seq) return value def __repr__(self): @@ -33711,19 +35055,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class cancel_delegation_token_result: +class remove_master_key_result: """ Attributes: - - o1 + - success """ thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (0, TType.BOOL, 'success', None, None, ), # 0 ) - def __init__(self, o1=None,): - self.o1 = o1 + def __init__(self, success=None,): + self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33734,10 +35077,9 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() else: iprot.skip(ftype) else: @@ -33749,10 +35091,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('cancel_delegation_token_result') - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) + oprot.writeStructBegin('remove_master_key_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33763,7 +35105,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.success) return value def __repr__(self): @@ -33777,23 +35119,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class add_token_args: - """ - Attributes: - - token_identifier - - delegation_token - """ +class get_master_keys_args: thrift_spec = ( - None, # 0 - (1, TType.STRING, 'token_identifier', None, None, ), # 1 - (2, TType.STRING, 'delegation_token', None, None, ), # 2 ) - def __init__(self, token_identifier=None, delegation_token=None,): - self.token_identifier = token_identifier - self.delegation_token = delegation_token - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -33803,16 +35133,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRING: - self.token_identifier = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.delegation_token = iprot.readString() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -33822,15 +35142,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('add_token_args') - if self.token_identifier is not None: - oprot.writeFieldBegin('token_identifier', TType.STRING, 1) - oprot.writeString(self.token_identifier) - oprot.writeFieldEnd() - if self.delegation_token is not None: - oprot.writeFieldBegin('delegation_token', TType.STRING, 2) - oprot.writeString(self.delegation_token) - oprot.writeFieldEnd() + oprot.writeStructBegin('get_master_keys_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -33840,8 +35152,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.token_identifier) - value = (value * 31) ^ hash(self.delegation_token) return value def __repr__(self): @@ -33855,14 +35165,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class add_token_result: +class get_master_keys_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.BOOL, 'success', None, None, ), # 0 + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 ) def __init__(self, success=None,): @@ -33878,8 +35188,13 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.BOOL: - self.success = iprot.readBool() + if ftype == TType.LIST: + self.success = [] + (_etype1127, _size1124) = iprot.readListBegin() + for _i1128 in xrange(_size1124): + _elem1129 = iprot.readString() + self.success.append(_elem1129) + iprot.readListEnd() else: iprot.skip(ftype) else: @@ -33891,10 +35206,13 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('add_token_result') + oprot.writeStructBegin('get_master_keys_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.BOOL, 0) - oprot.writeBool(self.success) + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1130 in self.success: + oprot.writeString(iter1130) + oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33919,20 +35237,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class remove_token_args: - """ - Attributes: - - token_identifier - """ +class get_open_txns_args: thrift_spec = ( - None, # 0 - (1, TType.STRING, 'token_identifier', None, None, ), # 1 ) - def __init__(self, token_identifier=None,): - self.token_identifier = token_identifier - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -33942,11 +35251,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRING: - self.token_identifier = iprot.readString() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -33956,11 +35260,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('remove_token_args') - if self.token_identifier is not None: - oprot.writeFieldBegin('token_identifier', TType.STRING, 1) - oprot.writeString(self.token_identifier) - oprot.writeFieldEnd() + oprot.writeStructBegin('get_open_txns_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -33970,7 +35270,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.token_identifier) return value def __repr__(self): @@ -33984,14 +35283,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class remove_token_result: +class get_open_txns_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.BOOL, 'success', None, None, ), # 0 + (0, TType.STRUCT, 'success', (GetOpenTxnsResponse, GetOpenTxnsResponse.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -34007,8 +35306,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.BOOL: - self.success = iprot.readBool() + if ftype == TType.STRUCT: + self.success = GetOpenTxnsResponse() + self.success.read(iprot) else: iprot.skip(ftype) else: @@ -34020,10 +35320,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('remove_token_result') + oprot.writeStructBegin('get_open_txns_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.BOOL, 0) - oprot.writeBool(self.success) + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34048,20 +35348,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_token_args: - """ - Attributes: - - token_identifier - """ +class get_open_txns_info_args: thrift_spec = ( - None, # 0 - (1, TType.STRING, 'token_identifier', None, None, ), # 1 ) - def __init__(self, token_identifier=None,): - self.token_identifier = token_identifier - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -34071,11 +35362,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRING: - self.token_identifier = iprot.readString() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34085,11 +35371,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_token_args') - if self.token_identifier is not None: - oprot.writeFieldBegin('token_identifier', TType.STRING, 1) - oprot.writeString(self.token_identifier) - oprot.writeFieldEnd() + oprot.writeStructBegin('get_open_txns_info_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -34099,7 +35381,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.token_identifier) return value def __repr__(self): @@ -34113,14 +35394,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_token_result: +class get_open_txns_info_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRING, 'success', None, None, ), # 0 + (0, TType.STRUCT, 'success', (GetOpenTxnsInfoResponse, GetOpenTxnsInfoResponse.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -34136,8 +35417,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRING: - self.success = iprot.readString() + if ftype == TType.STRUCT: + self.success = GetOpenTxnsInfoResponse() + self.success.read(iprot) else: iprot.skip(ftype) else: @@ -34149,10 +35431,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_token_result') + oprot.writeStructBegin('get_open_txns_info_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRING, 0) - oprot.writeString(self.success) + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34177,11 +35459,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_all_token_identifiers_args: +class open_txns_args: + """ + Attributes: + - rqst + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (OpenTxnRequest, OpenTxnRequest.thrift_spec), None, ), # 1 ) + def __init__(self, rqst=None,): + self.rqst = rqst + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -34191,6 +35482,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = OpenTxnRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34200,7 +35497,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_all_token_identifiers_args') + oprot.writeStructBegin('open_txns_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34210,6 +35511,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -34223,14 +35525,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_all_token_identifiers_result: +class open_txns_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (0, TType.STRUCT, 'success', (OpenTxnsResponse, OpenTxnsResponse.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -34246,13 +35548,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.LIST: - self.success = [] - (_etype1120, _size1117) = iprot.readListBegin() - for _i1121 in xrange(_size1117): - _elem1122 = iprot.readString() - self.success.append(_elem1122) - iprot.readListEnd() + if ftype == TType.STRUCT: + self.success = OpenTxnsResponse() + self.success.read(iprot) else: iprot.skip(ftype) else: @@ -34264,13 +35562,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_all_token_identifiers_result') + oprot.writeStructBegin('open_txns_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1123 in self.success: - oprot.writeString(iter1123) - oprot.writeListEnd() + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34295,19 +35590,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class add_master_key_args: +class abort_txn_args: """ Attributes: - - key + - rqst """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'key', None, None, ), # 1 + (1, TType.STRUCT, 'rqst', (AbortTxnRequest, AbortTxnRequest.thrift_spec), None, ), # 1 ) - def __init__(self, key=None,): - self.key = key + def __init__(self, rqst=None,): + self.rqst = rqst def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34319,8 +35614,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.key = iprot.readString() + if ftype == TType.STRUCT: + self.rqst = AbortTxnRequest() + self.rqst.read(iprot) else: iprot.skip(ftype) else: @@ -34332,10 +35628,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('add_master_key_args') - if self.key is not None: - oprot.writeFieldBegin('key', TType.STRING, 1) - oprot.writeString(self.key) + oprot.writeStructBegin('abort_txn_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34346,7 +35642,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.key) + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -34360,20 +35656,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class add_master_key_result: +class abort_txn_result: """ Attributes: - - success - o1 """ thrift_spec = ( - (0, TType.I32, 'success', None, None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None, o1=None,): - self.success = success + def __init__(self, o1=None,): self.o1 = o1 def read(self, iprot): @@ -34385,14 +35679,9 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.I32: - self.success = iprot.readI32() - else: - iprot.skip(ftype) - elif fid == 1: + if fid == 1: if ftype == TType.STRUCT: - self.o1 = MetaException() + self.o1 = NoSuchTxnException() self.o1.read(iprot) else: iprot.skip(ftype) @@ -34405,11 +35694,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('add_master_key_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.I32, 0) - oprot.writeI32(self.success) - oprot.writeFieldEnd() + oprot.writeStructBegin('abort_txn_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -34423,7 +35708,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) return value @@ -34438,22 +35722,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class update_master_key_args: +class abort_txns_args: """ Attributes: - - seq_number - - key + - rqst """ thrift_spec = ( None, # 0 - (1, TType.I32, 'seq_number', None, None, ), # 1 - (2, TType.STRING, 'key', None, None, ), # 2 + (1, TType.STRUCT, 'rqst', (AbortTxnsRequest, AbortTxnsRequest.thrift_spec), None, ), # 1 ) - def __init__(self, seq_number=None, key=None,): - self.seq_number = seq_number - self.key = key + def __init__(self, rqst=None,): + self.rqst = rqst def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34465,13 +35746,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.I32: - self.seq_number = iprot.readI32() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.key = iprot.readString() + if ftype == TType.STRUCT: + self.rqst = AbortTxnsRequest() + self.rqst.read(iprot) else: iprot.skip(ftype) else: @@ -34483,14 +35760,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('update_master_key_args') - if self.seq_number is not None: - oprot.writeFieldBegin('seq_number', TType.I32, 1) - oprot.writeI32(self.seq_number) - oprot.writeFieldEnd() - if self.key is not None: - oprot.writeFieldBegin('key', TType.STRING, 2) - oprot.writeString(self.key) + oprot.writeStructBegin('abort_txns_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34501,8 +35774,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.seq_number) - value = (value * 31) ^ hash(self.key) + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -34516,22 +35788,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class update_master_key_result: +class abort_txns_result: """ Attributes: - o1 - - o2 """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 ) - def __init__(self, o1=None, o2=None,): + def __init__(self, o1=None,): self.o1 = o1 - self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34544,16 +35813,10 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.o1 = NoSuchObjectException() + self.o1 = NoSuchTxnException() self.o1.read(iprot) else: iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRUCT: - self.o2 = MetaException() - self.o2.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34563,15 +35826,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('update_master_key_result') + oprot.writeStructBegin('abort_txns_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34582,7 +35841,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -34596,19 +35854,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class remove_master_key_args: +class commit_txn_args: """ Attributes: - - key_seq + - rqst """ thrift_spec = ( None, # 0 - (1, TType.I32, 'key_seq', None, None, ), # 1 + (1, TType.STRUCT, 'rqst', (CommitTxnRequest, CommitTxnRequest.thrift_spec), None, ), # 1 ) - def __init__(self, key_seq=None,): - self.key_seq = key_seq + def __init__(self, rqst=None,): + self.rqst = rqst def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34620,8 +35878,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.I32: - self.key_seq = iprot.readI32() + if ftype == TType.STRUCT: + self.rqst = CommitTxnRequest() + self.rqst.read(iprot) else: iprot.skip(ftype) else: @@ -34633,10 +35892,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('remove_master_key_args') - if self.key_seq is not None: - oprot.writeFieldBegin('key_seq', TType.I32, 1) - oprot.writeI32(self.key_seq) + oprot.writeStructBegin('commit_txn_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34647,7 +35906,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.key_seq) + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -34661,18 +35920,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class remove_master_key_result: +class commit_txn_result: """ Attributes: - - success + - o1 + - o2 """ thrift_spec = ( - (0, TType.BOOL, 'success', None, None, ), # 0 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None,): - self.success = success + def __init__(self, o1=None, o2=None,): + self.o1 = o1 + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34683,9 +35946,16 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.BOOL: - self.success = iprot.readBool() + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnAbortedException() + self.o2.read(iprot) else: iprot.skip(ftype) else: @@ -34697,10 +35967,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('remove_master_key_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.BOOL, 0) - oprot.writeBool(self.success) + oprot.writeStructBegin('commit_txn_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34711,7 +35985,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -34725,11 +36000,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_master_keys_args: +class lock_args: + """ + Attributes: + - rqst + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (LockRequest, LockRequest.thrift_spec), None, ), # 1 ) + def __init__(self, rqst=None,): + self.rqst = rqst + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -34739,6 +36023,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = LockRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34748,7 +36038,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_master_keys_args') + oprot.writeStructBegin('lock_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34758,6 +36052,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -34771,18 +36066,24 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_master_keys_result: +class lock_result: """ Attributes: - success + - o1 + - o2 """ thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (0, TType.STRUCT, 'success', (LockResponse, LockResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None,): + def __init__(self, success=None, o1=None, o2=None,): self.success = success + self.o1 = o1 + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34794,13 +36095,21 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.LIST: - self.success = [] - (_etype1127, _size1124) = iprot.readListBegin() - for _i1128 in xrange(_size1124): - _elem1129 = iprot.readString() - self.success.append(_elem1129) - iprot.readListEnd() + if ftype == TType.STRUCT: + self.success = LockResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnAbortedException() + self.o2.read(iprot) else: iprot.skip(ftype) else: @@ -34812,13 +36121,18 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_master_keys_result') + oprot.writeStructBegin('lock_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1130 in self.success: - oprot.writeString(iter1130) - oprot.writeListEnd() + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34830,6 +36144,8 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -34843,11 +36159,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_open_txns_args: +class check_lock_args: + """ + Attributes: + - rqst + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (CheckLockRequest, CheckLockRequest.thrift_spec), None, ), # 1 ) + def __init__(self, rqst=None,): + self.rqst = rqst + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -34857,6 +36182,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = CheckLockRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34866,7 +36197,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_open_txns_args') + oprot.writeStructBegin('check_lock_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34876,6 +36211,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -34889,18 +36225,27 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_open_txns_result: +class check_lock_result: """ Attributes: - success + - o1 + - o2 + - o3 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetOpenTxnsResponse, GetOpenTxnsResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (LockResponse, LockResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (NoSuchLockException, NoSuchLockException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None,): + def __init__(self, success=None, o1=None, o2=None, o3=None,): self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34913,10 +36258,28 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = GetOpenTxnsResponse() + self.success = LockResponse() self.success.read(iprot) else: iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnAbortedException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = NoSuchLockException() + self.o3.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34926,11 +36289,23 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_open_txns_result') + oprot.writeStructBegin('check_lock_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34941,6 +36316,9 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -34954,11 +36332,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_open_txns_info_args: +class unlock_args: + """ + Attributes: + - rqst + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (UnlockRequest, UnlockRequest.thrift_spec), None, ), # 1 ) + def __init__(self, rqst=None,): + self.rqst = rqst + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -34968,6 +36355,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = UnlockRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34977,7 +36370,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_open_txns_info_args') + oprot.writeStructBegin('unlock_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34987,6 +36384,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -35000,18 +36398,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_open_txns_info_result: +class unlock_result: """ Attributes: - - success + - o1 + - o2 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetOpenTxnsInfoResponse, GetOpenTxnsInfoResponse.thrift_spec), None, ), # 0 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchLockException, NoSuchLockException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (TxnOpenException, TxnOpenException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None,): - self.success = success + def __init__(self, o1=None, o2=None,): + self.o1 = o1 + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35022,10 +36424,16 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: + if fid == 1: if ftype == TType.STRUCT: - self.success = GetOpenTxnsInfoResponse() - self.success.read(iprot) + self.o1 = NoSuchLockException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnOpenException() + self.o2.read(iprot) else: iprot.skip(ftype) else: @@ -35037,10 +36445,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_open_txns_info_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeStructBegin('unlock_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35051,7 +36463,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -35065,7 +36478,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class open_txns_args: +class show_locks_args: """ Attributes: - rqst @@ -35073,7 +36486,7 @@ class open_txns_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (OpenTxnRequest, OpenTxnRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (ShowLocksRequest, ShowLocksRequest.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -35090,7 +36503,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = OpenTxnRequest() + self.rqst = ShowLocksRequest() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -35103,7 +36516,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('open_txns_args') + oprot.writeStructBegin('show_locks_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -35131,14 +36544,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class open_txns_result: +class show_locks_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (OpenTxnsResponse, OpenTxnsResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (ShowLocksResponse, ShowLocksResponse.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -35155,7 +36568,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = OpenTxnsResponse() + self.success = ShowLocksResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -35168,7 +36581,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('open_txns_result') + oprot.writeStructBegin('show_locks_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -35196,19 +36609,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class abort_txn_args: +class heartbeat_args: """ Attributes: - - rqst + - ids """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (AbortTxnRequest, AbortTxnRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'ids', (HeartbeatRequest, HeartbeatRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, ids=None,): + self.ids = ids def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35221,8 +36634,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = AbortTxnRequest() - self.rqst.read(iprot) + self.ids = HeartbeatRequest() + self.ids.read(iprot) else: iprot.skip(ftype) else: @@ -35234,10 +36647,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('abort_txn_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) + oprot.writeStructBegin('heartbeat_args') + if self.ids is not None: + oprot.writeFieldBegin('ids', TType.STRUCT, 1) + self.ids.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35248,7 +36661,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.ids) return value def __repr__(self): @@ -35262,19 +36675,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class abort_txn_result: +class heartbeat_result: """ Attributes: - o1 + - o2 + - o3 """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'o1', (NoSuchLockException, NoSuchLockException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 3 ) - def __init__(self, o1=None,): + def __init__(self, o1=None, o2=None, o3=None,): self.o1 = o1 + self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35287,10 +36706,22 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.o1 = NoSuchTxnException() + self.o1 = NoSuchLockException() self.o1.read(iprot) else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchTxnException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = TxnAbortedException() + self.o3.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -35300,11 +36731,19 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('abort_txn_result') + oprot.writeStructBegin('heartbeat_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35315,6 +36754,8 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -35328,19 +36769,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class abort_txns_args: +class heartbeat_txn_range_args: """ Attributes: - - rqst + - txns """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (AbortTxnsRequest, AbortTxnsRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'txns', (HeartbeatTxnRangeRequest, HeartbeatTxnRangeRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, txns=None,): + self.txns = txns def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35353,8 +36794,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = AbortTxnsRequest() - self.rqst.read(iprot) + self.txns = HeartbeatTxnRangeRequest() + self.txns.read(iprot) else: iprot.skip(ftype) else: @@ -35366,10 +36807,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('abort_txns_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) + oprot.writeStructBegin('heartbeat_txn_range_args') + if self.txns is not None: + oprot.writeFieldBegin('txns', TType.STRUCT, 1) + self.txns.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35380,7 +36821,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.txns) return value def __repr__(self): @@ -35394,19 +36835,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class abort_txns_result: +class heartbeat_txn_range_result: """ Attributes: - - o1 + - success """ thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (0, TType.STRUCT, 'success', (HeartbeatTxnRangeResponse, HeartbeatTxnRangeResponse.thrift_spec), None, ), # 0 ) - def __init__(self, o1=None,): - self.o1 = o1 + def __init__(self, success=None,): + self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35417,10 +36857,10 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: + if fid == 0: if ftype == TType.STRUCT: - self.o1 = NoSuchTxnException() - self.o1.read(iprot) + self.success = HeartbeatTxnRangeResponse() + self.success.read(iprot) else: iprot.skip(ftype) else: @@ -35432,10 +36872,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('abort_txns_result') - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) + oprot.writeStructBegin('heartbeat_txn_range_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35446,7 +36886,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.success) return value def __repr__(self): @@ -35460,7 +36900,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class commit_txn_args: +class compact_args: """ Attributes: - rqst @@ -35468,7 +36908,7 @@ class commit_txn_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (CommitTxnRequest, CommitTxnRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (CompactionRequest, CompactionRequest.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -35485,7 +36925,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = CommitTxnRequest() + self.rqst = CompactionRequest() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -35498,7 +36938,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('commit_txn_args') + oprot.writeStructBegin('compact_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -35526,23 +36966,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class commit_txn_result: - """ - Attributes: - - o1 - - o2 - """ +class compact_result: thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 ) - def __init__(self, o1=None, o2=None,): - self.o1 = o1 - self.o2 = o2 - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -35552,18 +36980,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.o1 = NoSuchTxnException() - self.o1.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRUCT: - self.o2 = TxnAbortedException() - self.o2.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -35573,15 +36989,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('commit_txn_result') - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('compact_result') oprot.writeFieldStop() oprot.writeStructEnd() @@ -35591,8 +36999,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -35606,7 +37012,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class lock_args: +class compact2_args: """ Attributes: - rqst @@ -35614,7 +37020,7 @@ class lock_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (LockRequest, LockRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (CompactionRequest, CompactionRequest.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -35631,7 +37037,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = LockRequest() + self.rqst = CompactionRequest() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -35644,7 +37050,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('lock_args') + oprot.writeStructBegin('compact2_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -35672,24 +37078,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class lock_result: +class compact2_result: """ Attributes: - success - - o1 - - o2 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (LockResponse, LockResponse.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 + (0, TType.STRUCT, 'success', (CompactionResponse, CompactionResponse.thrift_spec), None, ), # 0 ) - def __init__(self, success=None, o1=None, o2=None,): + def __init__(self, success=None,): self.success = success - self.o1 = o1 - self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35702,22 +37102,10 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = LockResponse() + self.success = CompactionResponse() self.success.read(iprot) else: iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = NoSuchTxnException() - self.o1.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRUCT: - self.o2 = TxnAbortedException() - self.o2.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -35727,19 +37115,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('lock_result') + oprot.writeStructBegin('compact2_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35750,8 +37130,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -35765,7 +37143,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class check_lock_args: +class show_compact_args: """ Attributes: - rqst @@ -35773,7 +37151,7 @@ class check_lock_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (CheckLockRequest, CheckLockRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (ShowCompactRequest, ShowCompactRequest.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -35790,7 +37168,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = CheckLockRequest() + self.rqst = ShowCompactRequest() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -35803,7 +37181,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('check_lock_args') + oprot.writeStructBegin('show_compact_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -35831,27 +37209,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class check_lock_result: +class show_compact_result: """ Attributes: - success - - o1 - - o2 - - o3 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (LockResponse, LockResponse.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 - (3, TType.STRUCT, 'o3', (NoSuchLockException, NoSuchLockException.thrift_spec), None, ), # 3 + (0, TType.STRUCT, 'success', (ShowCompactResponse, ShowCompactResponse.thrift_spec), None, ), # 0 ) - def __init__(self, success=None, o1=None, o2=None, o3=None,): + def __init__(self, success=None,): self.success = success - self.o1 = o1 - self.o2 = o2 - self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35864,28 +37233,10 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = LockResponse() + self.success = ShowCompactResponse() self.success.read(iprot) else: iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = NoSuchTxnException() - self.o1.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRUCT: - self.o2 = TxnAbortedException() - self.o2.read(iprot) - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.STRUCT: - self.o3 = NoSuchLockException() - self.o3.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -35895,23 +37246,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('check_lock_result') + oprot.writeStructBegin('show_compact_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) - oprot.writeFieldEnd() - if self.o3 is not None: - oprot.writeFieldBegin('o3', TType.STRUCT, 3) - self.o3.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35922,9 +37261,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) - value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -35938,7 +37274,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class unlock_args: +class add_dynamic_partitions_args: """ Attributes: - rqst @@ -35946,7 +37282,7 @@ class unlock_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (UnlockRequest, UnlockRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (AddDynamicPartitions, AddDynamicPartitions.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -35963,7 +37299,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = UnlockRequest() + self.rqst = AddDynamicPartitions() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -35976,7 +37312,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('unlock_args') + oprot.writeStructBegin('add_dynamic_partitions_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -36004,7 +37340,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class unlock_result: +class add_dynamic_partitions_result: """ Attributes: - o1 @@ -36013,8 +37349,8 @@ class unlock_result: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchLockException, NoSuchLockException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (TxnOpenException, TxnOpenException.thrift_spec), None, ), # 2 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 ) def __init__(self, o1=None, o2=None,): @@ -36032,13 +37368,13 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.o1 = NoSuchLockException() + self.o1 = NoSuchTxnException() self.o1.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = TxnOpenException() + self.o2 = TxnAbortedException() self.o2.read(iprot) else: iprot.skip(ftype) @@ -36051,7 +37387,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('unlock_result') + oprot.writeStructBegin('add_dynamic_partitions_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -36084,7 +37420,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class show_locks_args: +class get_next_notification_args: """ Attributes: - rqst @@ -36092,7 +37428,7 @@ class show_locks_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (ShowLocksRequest, ShowLocksRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (NotificationEventRequest, NotificationEventRequest.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -36109,7 +37445,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = ShowLocksRequest() + self.rqst = NotificationEventRequest() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -36122,7 +37458,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('show_locks_args') + oprot.writeStructBegin('get_next_notification_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -36150,14 +37486,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class show_locks_result: +class get_next_notification_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (ShowLocksResponse, ShowLocksResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (NotificationEventResponse, NotificationEventResponse.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -36174,7 +37510,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = ShowLocksResponse() + self.success = NotificationEventResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -36187,7 +37523,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('show_locks_result') + oprot.writeStructBegin('get_next_notification_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -36215,20 +37551,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class heartbeat_args: - """ - Attributes: - - ids - """ +class get_current_notificationEventId_args: thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'ids', (HeartbeatRequest, HeartbeatRequest.thrift_spec), None, ), # 1 ) - def __init__(self, ids=None,): - self.ids = ids - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -36238,12 +37565,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.ids = HeartbeatRequest() - self.ids.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -36253,11 +37574,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('heartbeat_args') - if self.ids is not None: - oprot.writeFieldBegin('ids', TType.STRUCT, 1) - self.ids.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('get_current_notificationEventId_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -36267,7 +37584,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.ids) return value def __repr__(self): @@ -36281,25 +37597,18 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class heartbeat_result: +class get_current_notificationEventId_result: """ Attributes: - - o1 - - o2 - - o3 + - success """ thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchLockException, NoSuchLockException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 2 - (3, TType.STRUCT, 'o3', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 3 + (0, TType.STRUCT, 'success', (CurrentNotificationEventId, CurrentNotificationEventId.thrift_spec), None, ), # 0 ) - def __init__(self, o1=None, o2=None, o3=None,): - self.o1 = o1 - self.o2 = o2 - self.o3 = o3 + def __init__(self, success=None,): + self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -36310,22 +37619,10 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.o1 = NoSuchLockException() - self.o1.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRUCT: - self.o2 = NoSuchTxnException() - self.o2.read(iprot) - else: - iprot.skip(ftype) - elif fid == 3: + if fid == 0: if ftype == TType.STRUCT: - self.o3 = TxnAbortedException() - self.o3.read(iprot) + self.success = CurrentNotificationEventId() + self.success.read(iprot) else: iprot.skip(ftype) else: @@ -36337,18 +37634,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('heartbeat_result') - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) - oprot.writeFieldEnd() - if self.o3 is not None: - oprot.writeFieldBegin('o3', TType.STRUCT, 3) - self.o3.write(oprot) + oprot.writeStructBegin('get_current_notificationEventId_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -36359,9 +37648,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) - value = (value * 31) ^ hash(self.o3) + value = (value * 31) ^ hash(self.success) return value def __repr__(self): @@ -36375,19 +37662,15 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class heartbeat_txn_range_args: +class get_notification_events_count_args: """ Attributes: - - txns + - rqst """ - thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'txns', (HeartbeatTxnRangeRequest, HeartbeatTxnRangeRequest.thrift_spec), None, ), # 1 - ) - - def __init__(self, txns=None,): - self.txns = txns + thrift_spec = None + def __init__(self, rqst=None,): + self.rqst = rqst def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -36398,10 +37681,10 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: + if fid == -1: if ftype == TType.STRUCT: - self.txns = HeartbeatTxnRangeRequest() - self.txns.read(iprot) + self.rqst = NotificationEventsCountRequest() + self.rqst.read(iprot) else: iprot.skip(ftype) else: @@ -36413,10 +37696,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('heartbeat_txn_range_args') - if self.txns is not None: - oprot.writeFieldBegin('txns', TType.STRUCT, 1) - self.txns.write(oprot) + oprot.writeStructBegin('get_notification_events_count_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, -1) + self.rqst.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -36427,7 +37710,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.txns) + value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -36441,14 +37724,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class heartbeat_txn_range_result: +class get_notification_events_count_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (HeartbeatTxnRangeResponse, HeartbeatTxnRangeResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (NotificationEventsCountResponse, NotificationEventsCountResponse.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -36465,7 +37748,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = HeartbeatTxnRangeResponse() + self.success = NotificationEventsCountResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -36478,7 +37761,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('heartbeat_txn_range_result') + oprot.writeStructBegin('get_notification_events_count_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -36506,119 +37789,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class compact_args: - """ - Attributes: - - rqst - """ - - thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'rqst', (CompactionRequest, CompactionRequest.thrift_spec), None, ), # 1 - ) - - def __init__(self, rqst=None,): - self.rqst = rqst - - def read(self, iprot): - if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: - fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) - return - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - if fid == 1: - if ftype == TType.STRUCT: - self.rqst = CompactionRequest() - self.rqst.read(iprot) - else: - iprot.skip(ftype) - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: - oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) - return - oprot.writeStructBegin('compact_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) - oprot.writeFieldEnd() - oprot.writeFieldStop() - oprot.writeStructEnd() - - def validate(self): - return - - - def __hash__(self): - value = 17 - value = (value * 31) ^ hash(self.rqst) - return value - - def __repr__(self): - L = ['%s=%r' % (key, value) - for key, value in self.__dict__.iteritems()] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not (self == other) - -class compact_result: - - thrift_spec = ( - ) - - def read(self, iprot): - if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: - fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) - return - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: - oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) - return - oprot.writeStructBegin('compact_result') - oprot.writeFieldStop() - oprot.writeStructEnd() - - def validate(self): - return - - - def __hash__(self): - value = 17 - return value - - def __repr__(self): - L = ['%s=%r' % (key, value) - for key, value in self.__dict__.iteritems()] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not (self == other) - -class compact2_args: +class fire_listener_event_args: """ Attributes: - rqst @@ -36626,7 +37797,7 @@ class compact2_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (CompactionRequest, CompactionRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (FireEventRequest, FireEventRequest.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -36643,7 +37814,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = CompactionRequest() + self.rqst = FireEventRequest() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -36656,7 +37827,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('compact2_args') + oprot.writeStructBegin('fire_listener_event_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -36684,14 +37855,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class compact2_result: +class fire_listener_event_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (CompactionResponse, CompactionResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (FireEventResponse, FireEventResponse.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -36708,7 +37879,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = CompactionResponse() + self.success = FireEventResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -36721,7 +37892,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('compact2_result') + oprot.writeStructBegin('fire_listener_event_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -36749,20 +37920,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class show_compact_args: - """ - Attributes: - - rqst - """ +class flushCache_args: thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'rqst', (ShowCompactRequest, ShowCompactRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -36772,12 +37934,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.rqst = ShowCompactRequest() - self.rqst.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -36787,11 +37943,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('show_compact_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('flushCache_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -36801,7 +37953,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -36815,19 +37966,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class show_compact_result: - """ - Attributes: - - success - """ +class flushCache_result: thrift_spec = ( - (0, TType.STRUCT, 'success', (ShowCompactResponse, ShowCompactResponse.thrift_spec), None, ), # 0 ) - def __init__(self, success=None,): - self.success = success - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -36837,12 +37980,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.STRUCT: - self.success = ShowCompactResponse() - self.success.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -36852,11 +37989,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('show_compact_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('flushCache_result') oprot.writeFieldStop() oprot.writeStructEnd() @@ -36866,7 +37999,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) return value def __repr__(self): @@ -36880,19 +38012,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class add_dynamic_partitions_args: +class cm_recycle_args: """ Attributes: - - rqst + - request """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (AddDynamicPartitions, AddDynamicPartitions.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'request', (CmRecycleRequest, CmRecycleRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, request=None,): + self.request = request def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -36905,8 +38037,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = AddDynamicPartitions() - self.rqst.read(iprot) + self.request = CmRecycleRequest() + self.request.read(iprot) else: iprot.skip(ftype) else: @@ -36918,10 +38050,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('add_dynamic_partitions_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) + oprot.writeStructBegin('cm_recycle_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -36932,7 +38064,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -36946,22 +38078,21 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class add_dynamic_partitions_result: +class cm_recycle_result: """ Attributes: + - success - o1 - - o2 """ thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 + (0, TType.STRUCT, 'success', (CmRecycleResponse, CmRecycleResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) - def __init__(self, o1=None, o2=None,): + def __init__(self, success=None, o1=None,): + self.success = success self.o1 = o1 - self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -36972,16 +38103,16 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: + if fid == 0: if ftype == TType.STRUCT: - self.o1 = NoSuchTxnException() - self.o1.read(iprot) + self.success = CmRecycleResponse() + self.success.read(iprot) else: iprot.skip(ftype) - elif fid == 2: + elif fid == 1: if ftype == TType.STRUCT: - self.o2 = TxnAbortedException() - self.o2.read(iprot) + self.o1 = MetaException() + self.o1.read(iprot) else: iprot.skip(ftype) else: @@ -36993,15 +38124,15 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('add_dynamic_partitions_result') + oprot.writeStructBegin('cm_recycle_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37011,8 +38142,8 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -37026,19 +38157,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_next_notification_args: +class get_file_metadata_by_expr_args: """ Attributes: - - rqst + - req """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (NotificationEventRequest, NotificationEventRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'req', (GetFileMetadataByExprRequest, GetFileMetadataByExprRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, req=None,): + self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37051,8 +38182,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = NotificationEventRequest() - self.rqst.read(iprot) + self.req = GetFileMetadataByExprRequest() + self.req.read(iprot) else: iprot.skip(ftype) else: @@ -37064,10 +38195,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_next_notification_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) + oprot.writeStructBegin('get_file_metadata_by_expr_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37078,7 +38209,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -37092,14 +38223,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_next_notification_result: +class get_file_metadata_by_expr_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (NotificationEventResponse, NotificationEventResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (GetFileMetadataByExprResult, GetFileMetadataByExprResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -37116,7 +38247,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = NotificationEventResponse() + self.success = GetFileMetadataByExprResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -37129,7 +38260,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_next_notification_result') + oprot.writeStructBegin('get_file_metadata_by_expr_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -37157,11 +38288,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_current_notificationEventId_args: +class get_file_metadata_args: + """ + Attributes: + - req + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (GetFileMetadataRequest, GetFileMetadataRequest.thrift_spec), None, ), # 1 ) + def __init__(self, req=None,): + self.req = req + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -37171,6 +38311,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetFileMetadataRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -37180,7 +38326,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_current_notificationEventId_args') + oprot.writeStructBegin('get_file_metadata_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37190,6 +38340,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -37203,14 +38354,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_current_notificationEventId_result: +class get_file_metadata_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (CurrentNotificationEventId, CurrentNotificationEventId.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (GetFileMetadataResult, GetFileMetadataResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -37227,7 +38378,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = CurrentNotificationEventId() + self.success = GetFileMetadataResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -37240,7 +38391,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_current_notificationEventId_result') + oprot.writeStructBegin('get_file_metadata_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -37268,15 +38419,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_notification_events_count_args: +class put_file_metadata_args: """ Attributes: - - rqst + - req """ - thrift_spec = None - def __init__(self, rqst=None,): - self.rqst = rqst + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (PutFileMetadataRequest, PutFileMetadataRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37287,10 +38442,10 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == -1: + if fid == 1: if ftype == TType.STRUCT: - self.rqst = NotificationEventsCountRequest() - self.rqst.read(iprot) + self.req = PutFileMetadataRequest() + self.req.read(iprot) else: iprot.skip(ftype) else: @@ -37302,10 +38457,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_notification_events_count_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, -1) - self.rqst.write(oprot) + oprot.writeStructBegin('put_file_metadata_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37316,7 +38471,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -37330,14 +38485,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_notification_events_count_result: +class put_file_metadata_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (NotificationEventsCountResponse, NotificationEventsCountResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (PutFileMetadataResult, PutFileMetadataResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -37354,7 +38509,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = NotificationEventsCountResponse() + self.success = PutFileMetadataResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -37367,7 +38522,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_notification_events_count_result') + oprot.writeStructBegin('put_file_metadata_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -37395,19 +38550,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class fire_listener_event_args: +class clear_file_metadata_args: """ Attributes: - - rqst + - req """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (FireEventRequest, FireEventRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'req', (ClearFileMetadataRequest, ClearFileMetadataRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, req=None,): + self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37420,8 +38575,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = FireEventRequest() - self.rqst.read(iprot) + self.req = ClearFileMetadataRequest() + self.req.read(iprot) else: iprot.skip(ftype) else: @@ -37433,10 +38588,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('fire_listener_event_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) + oprot.writeStructBegin('clear_file_metadata_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37447,7 +38602,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -37461,14 +38616,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class fire_listener_event_result: +class clear_file_metadata_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (FireEventResponse, FireEventResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (ClearFileMetadataResult, ClearFileMetadataResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -37485,7 +38640,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = FireEventResponse() + self.success = ClearFileMetadataResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -37498,7 +38653,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('fire_listener_event_result') + oprot.writeStructBegin('clear_file_metadata_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -37526,11 +38681,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class flushCache_args: +class cache_file_metadata_args: + """ + Attributes: + - req + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (CacheFileMetadataRequest, CacheFileMetadataRequest.thrift_spec), None, ), # 1 ) + def __init__(self, req=None,): + self.req = req + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -37540,6 +38704,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.req = CacheFileMetadataRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -37549,7 +38719,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('flushCache_args') + oprot.writeStructBegin('cache_file_metadata_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37559,6 +38733,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -37572,11 +38747,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class flushCache_result: +class cache_file_metadata_result: + """ + Attributes: + - success + """ thrift_spec = ( + (0, TType.STRUCT, 'success', (CacheFileMetadataResult, CacheFileMetadataResult.thrift_spec), None, ), # 0 ) + def __init__(self, success=None,): + self.success = success + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -37586,6 +38769,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 0: + if ftype == TType.STRUCT: + self.success = CacheFileMetadataResult() + self.success.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -37595,7 +38784,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('flushCache_result') + oprot.writeStructBegin('cache_file_metadata_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37605,6 +38798,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.success) return value def __repr__(self): @@ -37618,20 +38812,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class cm_recycle_args: - """ - Attributes: - - request - """ +class get_metastore_db_uuid_args: thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'request', (CmRecycleRequest, CmRecycleRequest.thrift_spec), None, ), # 1 ) - def __init__(self, request=None,): - self.request = request - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -37641,12 +38826,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.request = CmRecycleRequest() - self.request.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -37656,11 +38835,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('cm_recycle_args') - if self.request is not None: - oprot.writeFieldBegin('request', TType.STRUCT, 1) - self.request.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('get_metastore_db_uuid_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -37670,7 +38845,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.request) return value def __repr__(self): @@ -37684,7 +38858,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class cm_recycle_result: +class get_metastore_db_uuid_result: """ Attributes: - success @@ -37692,7 +38866,7 @@ class cm_recycle_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (CmRecycleResponse, CmRecycleResponse.thrift_spec), None, ), # 0 + (0, TType.STRING, 'success', None, None, ), # 0 (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) @@ -37710,9 +38884,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: - if ftype == TType.STRUCT: - self.success = CmRecycleResponse() - self.success.read(iprot) + if ftype == TType.STRING: + self.success = iprot.readString() else: iprot.skip(ftype) elif fid == 1: @@ -37730,10 +38903,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('cm_recycle_result') + oprot.writeStructBegin('get_metastore_db_uuid_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeFieldBegin('success', TType.STRING, 0) + oprot.writeString(self.success) oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -37763,19 +38936,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_file_metadata_by_expr_args: +class create_resource_plan_args: """ Attributes: - - req + - resourcePlan """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'req', (GetFileMetadataByExprRequest, GetFileMetadataByExprRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'resourcePlan', (WMResourcePlan, WMResourcePlan.thrift_spec), None, ), # 1 ) - def __init__(self, req=None,): - self.req = req + def __init__(self, resourcePlan=None,): + self.resourcePlan = resourcePlan def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37788,8 +38961,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.req = GetFileMetadataByExprRequest() - self.req.read(iprot) + self.resourcePlan = WMResourcePlan() + self.resourcePlan.read(iprot) else: iprot.skip(ftype) else: @@ -37801,10 +38974,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_file_metadata_by_expr_args') - if self.req is not None: - oprot.writeFieldBegin('req', TType.STRUCT, 1) - self.req.write(oprot) + oprot.writeStructBegin('create_resource_plan_args') + if self.resourcePlan is not None: + oprot.writeFieldBegin('resourcePlan', TType.STRUCT, 1) + self.resourcePlan.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37815,7 +38988,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.req) + value = (value * 31) ^ hash(self.resourcePlan) return value def __repr__(self): @@ -37829,18 +39002,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_file_metadata_by_expr_result: +class create_resource_plan_result: """ Attributes: - - success + - o1 + - o2 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetFileMetadataByExprResult, GetFileMetadataByExprResult.thrift_spec), None, ), # 0 + None, # 0 + (1, TType.STRUCT, 'o1', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None,): - self.success = success + def __init__(self, o1=None, o2=None,): + self.o1 = o1 + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37851,10 +39028,16 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: + if fid == 1: if ftype == TType.STRUCT: - self.success = GetFileMetadataByExprResult() - self.success.read(iprot) + self.o1 = InvalidObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) else: iprot.skip(ftype) else: @@ -37866,10 +39049,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_file_metadata_by_expr_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeStructBegin('create_resource_plan_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37880,7 +39067,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -37894,19 +39082,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_file_metadata_args: +class get_resource_plan_args: """ Attributes: - - req + - name """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'req', (GetFileMetadataRequest, GetFileMetadataRequest.thrift_spec), None, ), # 1 + (1, TType.STRING, 'name', None, None, ), # 1 ) - def __init__(self, req=None,): - self.req = req + def __init__(self, name=None,): + self.name = name def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37918,9 +39106,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.req = GetFileMetadataRequest() - self.req.read(iprot) + if ftype == TType.STRING: + self.name = iprot.readString() else: iprot.skip(ftype) else: @@ -37932,10 +39119,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_file_metadata_args') - if self.req is not None: - oprot.writeFieldBegin('req', TType.STRUCT, 1) - self.req.write(oprot) + oprot.writeStructBegin('get_resource_plan_args') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37946,7 +39133,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.req) + value = (value * 31) ^ hash(self.name) return value def __repr__(self): @@ -37960,18 +39147,24 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_file_metadata_result: +class get_resource_plan_result: """ Attributes: - success + - o1 + - o2 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetFileMetadataResult, GetFileMetadataResult.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (WMResourcePlan, WMResourcePlan.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None,): + def __init__(self, success=None, o1=None, o2=None,): self.success = success + self.o1 = o1 + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37984,10 +39177,22 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = GetFileMetadataResult() + self.success = WMResourcePlan() self.success.read(iprot) else: iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -37997,11 +39202,19 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_file_metadata_result') + oprot.writeStructBegin('get_resource_plan_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38012,6 +39225,8 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -38025,20 +39240,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class put_file_metadata_args: - """ - Attributes: - - req - """ +class get_all_resource_plans_args: thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'req', (PutFileMetadataRequest, PutFileMetadataRequest.thrift_spec), None, ), # 1 ) - def __init__(self, req=None,): - self.req = req - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -38048,12 +39254,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.req = PutFileMetadataRequest() - self.req.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -38063,11 +39263,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('put_file_metadata_args') - if self.req is not None: - oprot.writeFieldBegin('req', TType.STRUCT, 1) - self.req.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('get_all_resource_plans_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -38077,7 +39273,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -38091,18 +39286,21 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class put_file_metadata_result: +class get_all_resource_plans_result: """ Attributes: - success + - o1 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (PutFileMetadataResult, PutFileMetadataResult.thrift_spec), None, ), # 0 + (0, TType.LIST, 'success', (TType.STRUCT,(WMResourcePlan, WMResourcePlan.thrift_spec)), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None,): + def __init__(self, success=None, o1=None,): self.success = success + self.o1 = o1 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38114,9 +39312,20 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1134, _size1131) = iprot.readListBegin() + for _i1135 in xrange(_size1131): + _elem1136 = WMResourcePlan() + _elem1136.read(iprot) + self.success.append(_elem1136) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: if ftype == TType.STRUCT: - self.success = PutFileMetadataResult() - self.success.read(iprot) + self.o1 = MetaException() + self.o1.read(iprot) else: iprot.skip(ftype) else: @@ -38128,10 +39337,17 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('put_file_metadata_result') + oprot.writeStructBegin('get_all_resource_plans_result') if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1137 in self.success: + iter1137.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38143,6 +39359,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -38156,19 +39373,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class clear_file_metadata_args: +class alter_resource_plan_args: """ Attributes: - - req + - name + - resourcePlan """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'req', (ClearFileMetadataRequest, ClearFileMetadataRequest.thrift_spec), None, ), # 1 + (1, TType.STRING, 'name', None, None, ), # 1 + (2, TType.STRUCT, 'resourcePlan', (WMResourcePlan, WMResourcePlan.thrift_spec), None, ), # 2 ) - def __init__(self, req=None,): - self.req = req + def __init__(self, name=None, resourcePlan=None,): + self.name = name + self.resourcePlan = resourcePlan def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38180,9 +39400,14 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: + if ftype == TType.STRING: + self.name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: if ftype == TType.STRUCT: - self.req = ClearFileMetadataRequest() - self.req.read(iprot) + self.resourcePlan = WMResourcePlan() + self.resourcePlan.read(iprot) else: iprot.skip(ftype) else: @@ -38194,10 +39419,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('clear_file_metadata_args') - if self.req is not None: - oprot.writeFieldBegin('req', TType.STRUCT, 1) - self.req.write(oprot) + oprot.writeStructBegin('alter_resource_plan_args') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name) + oprot.writeFieldEnd() + if self.resourcePlan is not None: + oprot.writeFieldBegin('resourcePlan', TType.STRUCT, 2) + self.resourcePlan.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38208,7 +39437,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.req) + value = (value * 31) ^ hash(self.name) + value = (value * 31) ^ hash(self.resourcePlan) return value def __repr__(self): @@ -38222,18 +39452,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class clear_file_metadata_result: +class alter_resource_plan_result: """ Attributes: - - success + - o1 + - o3 + - o2 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (ClearFileMetadataResult, ClearFileMetadataResult.thrift_spec), None, ), # 0 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o3', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None,): - self.success = success + def __init__(self, o1=None, o3=None, o2=None,): + self.o1 = o1 + self.o3 = o3 + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38244,10 +39481,22 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: + if fid == 1: if ftype == TType.STRUCT: - self.success = ClearFileMetadataResult() - self.success.read(iprot) + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o3 = InvalidOperationException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) else: iprot.skip(ftype) else: @@ -38259,10 +39508,18 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('clear_file_metadata_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeStructBegin('alter_resource_plan_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 2) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 3) + self.o2.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38273,7 +39530,9 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o3) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -38287,19 +39546,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class cache_file_metadata_args: +class validate_resource_plan_args: """ Attributes: - - req + - name """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'req', (CacheFileMetadataRequest, CacheFileMetadataRequest.thrift_spec), None, ), # 1 + (1, TType.STRING, 'name', None, None, ), # 1 ) - def __init__(self, req=None,): - self.req = req + def __init__(self, name=None,): + self.name = name def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38311,9 +39570,8 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRUCT: - self.req = CacheFileMetadataRequest() - self.req.read(iprot) + if ftype == TType.STRING: + self.name = iprot.readString() else: iprot.skip(ftype) else: @@ -38325,10 +39583,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('cache_file_metadata_args') - if self.req is not None: - oprot.writeFieldBegin('req', TType.STRUCT, 1) - self.req.write(oprot) + oprot.writeStructBegin('validate_resource_plan_args') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38339,7 +39597,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.req) + value = (value * 31) ^ hash(self.name) return value def __repr__(self): @@ -38353,18 +39611,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class cache_file_metadata_result: +class validate_resource_plan_result: """ Attributes: - - success + - o1 + - o3 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (CacheFileMetadataResult, CacheFileMetadataResult.thrift_spec), None, ), # 0 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None,): - self.success = success + def __init__(self, o1=None, o3=None,): + self.o1 = o1 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38375,10 +39637,16 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: + if fid == 1: if ftype == TType.STRUCT: - self.success = CacheFileMetadataResult() - self.success.read(iprot) + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) else: iprot.skip(ftype) else: @@ -38390,10 +39658,14 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('cache_file_metadata_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeStructBegin('validate_resource_plan_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 2) + self.o3.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38404,7 +39676,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -38418,11 +39691,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_metastore_db_uuid_args: +class drop_resource_plan_args: + """ + Attributes: + - name + """ thrift_spec = ( + None, # 0 + (1, TType.STRING, 'name', None, None, ), # 1 ) + def __init__(self, name=None,): + self.name = name + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -38432,6 +39714,11 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRING: + self.name = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -38441,7 +39728,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_metastore_db_uuid_args') + oprot.writeStructBegin('drop_resource_plan_args') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38451,6 +39742,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.name) return value def __repr__(self): @@ -38464,21 +39756,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_metastore_db_uuid_result: +class drop_resource_plan_result: """ Attributes: - - success - o1 + - o2 """ thrift_spec = ( - (0, TType.STRING, 'success', None, None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 ) - def __init__(self, success=None, o1=None,): - self.success = success + def __init__(self, o1=None, o2=None,): self.o1 = o1 + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -38489,15 +39782,16 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.STRING: - self.success = iprot.readString() + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) else: iprot.skip(ftype) - elif fid == 1: + elif fid == 2: if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) + self.o2 = MetaException() + self.o2.read(iprot) else: iprot.skip(ftype) else: @@ -38509,15 +39803,15 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_metastore_db_uuid_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRING, 0) - oprot.writeString(self.success) - oprot.writeFieldEnd() + oprot.writeStructBegin('drop_resource_plan_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -38527,8 +39821,8 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): diff --git standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 7df14f8167..91ca235a03 100644 --- standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -2693,6 +2693,104 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_metastore_db_uuid failed: unknown result') end + def create_resource_plan(resourcePlan) + send_create_resource_plan(resourcePlan) + recv_create_resource_plan() + end + + def send_create_resource_plan(resourcePlan) + send_message('create_resource_plan', Create_resource_plan_args, :resourcePlan => resourcePlan) + end + + def recv_create_resource_plan() + result = receive_message(Create_resource_plan_result) + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + return + end + + def get_resource_plan(name) + send_get_resource_plan(name) + return recv_get_resource_plan() + end + + def send_get_resource_plan(name) + send_message('get_resource_plan', Get_resource_plan_args, :name => name) + end + + def recv_get_resource_plan() + result = receive_message(Get_resource_plan_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_resource_plan failed: unknown result') + end + + def get_all_resource_plans() + send_get_all_resource_plans() + return recv_get_all_resource_plans() + end + + def send_get_all_resource_plans() + send_message('get_all_resource_plans', Get_all_resource_plans_args) + end + + def recv_get_all_resource_plans() + result = receive_message(Get_all_resource_plans_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_all_resource_plans failed: unknown result') + end + + def alter_resource_plan(name, resourcePlan) + send_alter_resource_plan(name, resourcePlan) + recv_alter_resource_plan() + end + + def send_alter_resource_plan(name, resourcePlan) + send_message('alter_resource_plan', Alter_resource_plan_args, :name => name, :resourcePlan => resourcePlan) + end + + def recv_alter_resource_plan() + result = receive_message(Alter_resource_plan_result) + raise result.o1 unless result.o1.nil? + raise result.o3 unless result.o3.nil? + raise result.o2 unless result.o2.nil? + return + end + + def validate_resource_plan(name) + send_validate_resource_plan(name) + recv_validate_resource_plan() + end + + def send_validate_resource_plan(name) + send_message('validate_resource_plan', Validate_resource_plan_args, :name => name) + end + + def recv_validate_resource_plan() + result = receive_message(Validate_resource_plan_result) + raise result.o1 unless result.o1.nil? + raise result.o3 unless result.o3.nil? + return + end + + def drop_resource_plan(name) + send_drop_resource_plan(name) + recv_drop_resource_plan() + end + + def send_drop_resource_plan(name) + send_message('drop_resource_plan', Drop_resource_plan_args, :name => name) + end + + def recv_drop_resource_plan() + result = receive_message(Drop_resource_plan_result) + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + return + end + end class Processor < ::FacebookService::Processor @@ -4684,6 +4782,84 @@ module ThriftHiveMetastore write_result(result, oprot, 'get_metastore_db_uuid', seqid) end + def process_create_resource_plan(seqid, iprot, oprot) + args = read_args(iprot, Create_resource_plan_args) + result = Create_resource_plan_result.new() + begin + @handler.create_resource_plan(args.resourcePlan) + rescue ::InvalidObjectException => o1 + result.o1 = o1 + rescue ::MetaException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'create_resource_plan', seqid) + end + + def process_get_resource_plan(seqid, iprot, oprot) + args = read_args(iprot, Get_resource_plan_args) + result = Get_resource_plan_result.new() + begin + result.success = @handler.get_resource_plan(args.name) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::MetaException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'get_resource_plan', seqid) + end + + def process_get_all_resource_plans(seqid, iprot, oprot) + args = read_args(iprot, Get_all_resource_plans_args) + result = Get_all_resource_plans_result.new() + begin + result.success = @handler.get_all_resource_plans() + rescue ::MetaException => o1 + result.o1 = o1 + end + write_result(result, oprot, 'get_all_resource_plans', seqid) + end + + def process_alter_resource_plan(seqid, iprot, oprot) + args = read_args(iprot, Alter_resource_plan_args) + result = Alter_resource_plan_result.new() + begin + @handler.alter_resource_plan(args.name, args.resourcePlan) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::InvalidOperationException => o3 + result.o3 = o3 + rescue ::MetaException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'alter_resource_plan', seqid) + end + + def process_validate_resource_plan(seqid, iprot, oprot) + args = read_args(iprot, Validate_resource_plan_args) + result = Validate_resource_plan_result.new() + begin + @handler.validate_resource_plan(args.name) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::MetaException => o3 + result.o3 = o3 + end + write_result(result, oprot, 'validate_resource_plan', seqid) + end + + def process_drop_resource_plan(seqid, iprot, oprot) + args = read_args(iprot, Drop_resource_plan_args) + result = Drop_resource_plan_result.new() + begin + @handler.drop_resource_plan(args.name) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::MetaException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'drop_resource_plan', seqid) + end + end # HELPER FUNCTIONS AND STRUCTURES @@ -10739,5 +10915,214 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Create_resource_plan_args + include ::Thrift::Struct, ::Thrift::Struct_Union + RESOURCEPLAN = 1 + + FIELDS = { + RESOURCEPLAN => {:type => ::Thrift::Types::STRUCT, :name => 'resourcePlan', :class => ::WMResourcePlan} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Create_resource_plan_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O2 = 2 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::InvalidObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_resource_plan_args + include ::Thrift::Struct, ::Thrift::Struct_Union + NAME = 1 + + FIELDS = { + NAME => {:type => ::Thrift::Types::STRING, :name => 'name'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_resource_plan_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::WMResourcePlan}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_all_resource_plans_args + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_all_resource_plans_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRUCT, :class => ::WMResourcePlan}}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Alter_resource_plan_args + include ::Thrift::Struct, ::Thrift::Struct_Union + NAME = 1 + RESOURCEPLAN = 2 + + FIELDS = { + NAME => {:type => ::Thrift::Types::STRING, :name => 'name'}, + RESOURCEPLAN => {:type => ::Thrift::Types::STRUCT, :name => 'resourcePlan', :class => ::WMResourcePlan} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Alter_resource_plan_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O3 = 2 + O2 = 3 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::InvalidOperationException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Validate_resource_plan_args + include ::Thrift::Struct, ::Thrift::Struct_Union + NAME = 1 + + FIELDS = { + NAME => {:type => ::Thrift::Types::STRING, :name => 'name'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Validate_resource_plan_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O3 = 2 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Drop_resource_plan_args + include ::Thrift::Struct, ::Thrift::Struct_Union + NAME = 1 + + FIELDS = { + NAME => {:type => ::Thrift::Types::STRING, :name => 'name'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Drop_resource_plan_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O2 = 2 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + end diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index ffb2abdf62..6c2ff6b549 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -91,6 +91,7 @@ import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -108,6 +109,7 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.ResourceType; import org.apache.hadoop.hive.metastore.api.ResourceUri; import org.apache.hadoop.hive.metastore.api.Role; @@ -149,6 +151,8 @@ import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics; import org.apache.hadoop.hive.metastore.model.MPartitionEvent; import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege; +import org.apache.hadoop.hive.metastore.model.MWMResourcePlan; +import org.apache.hadoop.hive.metastore.model.MWMResourcePlan.Status; import org.apache.hadoop.hive.metastore.model.MResourceUri; import org.apache.hadoop.hive.metastore.model.MRole; import org.apache.hadoop.hive.metastore.model.MRoleMap; @@ -9310,4 +9314,221 @@ public static void setTwoMetastoreTesting(boolean twoMetastoreTesting) { Properties getProp() { return prop; } + + @Override + public void createResourcePlan(WMResourcePlan resourcePlan) throws MetaException { + boolean commited = false; + String rpName = normalizeIdentifier(resourcePlan.getName()); + Integer queryParallelism = resourcePlan.isSetQueryParallelism() ? + resourcePlan.getQueryParallelism() : null; + MWMResourcePlan rp = new MWMResourcePlan(rpName, queryParallelism, MWMResourcePlan.Status.DISABLED); + try { + openTransaction(); + pm.makePersistent(rp); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + } + + private WMResourcePlan fromMResourcePlan(MWMResourcePlan mplan) { + if (mplan == null) { + return null; + } + WMResourcePlan rp = new WMResourcePlan(); + rp.setName(mplan.getName()); + rp.setStatus(mplan.getStatus().name()); + if (mplan.getQueryParallelism() != null) { + rp.setQueryParallelism(mplan.getQueryParallelism()); + } + return rp; + } + + @Override + public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException { + WMResourcePlan resourcePlan = null; + boolean commited = false; + Query query = null; + try { + openTransaction(); + name = normalizeIdentifier(name); + query = pm.newQuery(MWMResourcePlan.class, "name == rpname"); + query.declareParameters("java.lang.String rpname"); + query.setUnique(true); + MWMResourcePlan mResourcePlan = (MWMResourcePlan) query.execute(name); + pm.retrieve(mResourcePlan); + resourcePlan = fromMResourcePlan(mResourcePlan); + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, query); + } + if (resourcePlan == null) { + throw new NoSuchObjectException("There is no resource plan named " + name); + } + return resourcePlan; + } + + @Override + public List getAllResourcePlans() throws MetaException { + List resourcePlans = new ArrayList(); + boolean commited = false; + Query query = null; + try { + openTransaction(); + query = pm.newQuery(MWMResourcePlan.class); + List mplans = (List) query.execute(); + pm.retrieveAll(mplans); + commited = commitTransaction(); + if (mplans != null) { + for (MWMResourcePlan mplan : mplans) { + resourcePlans.add(fromMResourcePlan(mplan)); + } + } + } finally { + rollbackAndCleanup(commited, query); + } + return resourcePlans; + } + + @Override + public void alterResourcePlan(String name, WMResourcePlan resourcePlan) + throws NoSuchObjectException, InvalidOperationException, MetaException { + name = normalizeIdentifier(name); + boolean commited = false; + Query query = null; + try { + openTransaction(); + query = pm.newQuery(MWMResourcePlan.class, "name = rpName"); + query.declareParameters("java.lang.String rpName"); + query.setUnique(true); + MWMResourcePlan mResourcePlan = (MWMResourcePlan) query.execute(name); + if (mResourcePlan == null) { + throw new NoSuchObjectException("Cannot find resource plan: " + name); + } + if (!resourcePlan.getName().equals(name)) { + mResourcePlan.setName(resourcePlan.getName()); + } + if (resourcePlan.isSetQueryParallelism()) { + mResourcePlan.setQueryParallelism(resourcePlan.getQueryParallelism()); + } + if (resourcePlan.isSetStatus()) { + switchStatus(name, mResourcePlan, resourcePlan.getStatus()); + } + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, query); + } + } + + private void switchStatus(String name, MWMResourcePlan mResourcePlan, String status) + throws InvalidOperationException { + Status currentStatus = mResourcePlan.getStatus(); + Status newStatus = null; + try { + newStatus = Status.valueOf(status); + } catch (IllegalArgumentException e) { + throw new InvalidOperationException("Invalid status: " + status); + } + + if (newStatus == currentStatus) { + return; + } + + // No status change for active resource plan, first activate another plan. + if (currentStatus == Status.ACTIVE) { + throw new InvalidOperationException( + "Resource plan " + name + "is active, activate another plan first."); + } + + if ((currentStatus == Status.ENABLED && newStatus == Status.DISABLED) || + (currentStatus == Status.DISABLED && newStatus == Status.ENABLED)) { + // enabled plan can be disabled and disabled plan enabled. + mResourcePlan.setStatus(newStatus); + } else if (currentStatus == Status.ENABLED && newStatus == Status.ACTIVE) { + // We can activate an enabled resource plan. + if (!isResourcePlanValid(mResourcePlan)) { + throw new InvalidOperationException("ResourcePlan: " + name + " is invalid."); + } + // Deactivate currently active resource plan. + deactivateActiveResourcePlan(); + mResourcePlan.setStatus(newStatus); + } else if (currentStatus == Status.DISABLED && newStatus == Status.ACTIVE) { + throw new InvalidOperationException( + "Cannot activate resource plan: " + name + " first enable it"); + } else { + // This should not happen just there in case we add more status. + throw new InvalidOperationException("Cannot move resource plan: " + name + + " from " + currentStatus + " to " + newStatus); + } + } + + private void deactivateActiveResourcePlan() { + boolean commited = false; + Query query = null; + try { + openTransaction(); + query = pm.newQuery(MWMResourcePlan.class, "status == expectedStatus"); + query.declareParameters( + "org.apache.hadoop.hive.metastore.model.MWMResourcePlan.Status expectedStatus"); + query.setUnique(true); + MWMResourcePlan mResourcePlan = (MWMResourcePlan) query.execute(Status.ACTIVE); + // We may not have an active resource plan in the start. + if (mResourcePlan != null) { + mResourcePlan.setStatus(Status.ENABLED); + } + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, query); + } + } + + private boolean isResourcePlanValid(MWMResourcePlan mResourcePlan) { + return true; + } + + @Override + public void validateResourcePlan(String name) + throws NoSuchObjectException, InvalidObjectException, MetaException { + name = normalizeIdentifier(name); + boolean commited = false; + Query query = null; + try { + openTransaction(); + query = pm.newQuery(MWMResourcePlan.class, "name = rpName"); + query.declareParameters("java.lang.String rpName"); + query.setUnique(true); + MWMResourcePlan mResourcePlan = (MWMResourcePlan) query.execute(name); + if (mResourcePlan == null) { + throw new NoSuchObjectException("Cannot find resourcePlan: " + name); + } + // Validate resource plan. + if (!isResourcePlanValid(mResourcePlan)) { + throw new InvalidObjectException("ResourcePlan: " + name + " is not valid"); + } + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, query); + } + } + + @Override + public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException { + name = normalizeIdentifier(name); + boolean commited = false; + Query query = null; + try { + openTransaction(); + query = pm.newQuery(MWMResourcePlan.class); + query.setFilter("name = resourcePlanName && state != \"ACTIVE\""); + query.declareParameters("java.lang.String resourcePlanName"); + if (query.deletePersistentAll(name) == 0) { + throw new NoSuchObjectException("Cannot find resourcePlan: " + name + " or its active"); + } + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, query); + } + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 0e6d8a4da8..fee35caa6a 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -54,6 +55,7 @@ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; @@ -744,4 +746,18 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] * @throws MetaException */ String getMetastoreDbUuid() throws MetaException; + + void createResourcePlan(WMResourcePlan resourcePlan) throws MetaException; + + WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException, MetaException; + + List getAllResourcePlans() throws MetaException; + + void alterResourcePlan(String name, WMResourcePlan resourcePlan) + throws NoSuchObjectException, InvalidOperationException, MetaException; + + void validateResourcePlan(String name) + throws NoSuchObjectException, InvalidObjectException, MetaException; + + void dropResourcePlan(String name) throws NoSuchObjectException, MetaException; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index e66052bf8b..de9fcfd6f3 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -73,6 +74,7 @@ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; @@ -2239,4 +2241,36 @@ private boolean waitForInit() throws MetaException { void setInitializedForTest() { sharedCacheWrapper.updateInitState(null, false); } + + @Override + public void createResourcePlan(WMResourcePlan resourcePlan) throws MetaException { + rawStore.createResourcePlan(resourcePlan); + } + + @Override + public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException, MetaException { + return rawStore.getResourcePlan(name); + } + + @Override + public List getAllResourcePlans() throws MetaException { + return rawStore.getAllResourcePlans(); + } + + @Override + public void alterResourcePlan(String name, WMResourcePlan resourcePlan) + throws NoSuchObjectException, InvalidOperationException, MetaException { + rawStore.alterResourcePlan(name, resourcePlan); + } + + @Override + public void validateResourcePlan(String name) + throws NoSuchObjectException, InvalidObjectException, MetaException { + rawStore.validateResourcePlan(name); + } + + @Override + public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException { + rawStore.dropResourcePlan(name); + } } diff --git standalone-metastore/src/main/thrift/hive_metastore.thrift standalone-metastore/src/main/thrift/hive_metastore.thrift index f7f9363b4b..7a8a7a8cbd 100644 --- standalone-metastore/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -1623,6 +1623,19 @@ service ThriftHiveMetastore extends fb303.FacebookService // Metastore DB properties string get_metastore_db_uuid() throws (1:MetaException o1) + + // Resource plan api's. + void create_resource_plan(1:WMResourcePlan resourcePlan) throws(1:InvalidObjectException o1, 2:MetaException o2) + + WMResourcePlan get_resource_plan(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2) + + list get_all_resource_plans() throws(1:MetaException o1) + + void alter_resource_plan(1:string name, 2:WMResourcePlan resourcePlan) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o3, 3:MetaException o2) + + void validate_resource_plan(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o3) + + void drop_resource_plan(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2) } // * Note about the DDL_TIME: When creating or altering a table or a partition,