diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 7e0ce0734b..dca5c6432d 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -19,12 +19,7 @@ package org.apache.hive.hcatalog.listener; import org.apache.hadoop.hive.common.TableName; -import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec; -import org.apache.hadoop.hive.metastore.api.GetPartitionsProjectionSpec; -import org.apache.hadoop.hive.metastore.api.ISchemaName; -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.*; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -38,66 +33,6 @@ import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.RawStore; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.ISchema; -import org.apache.hadoop.hive.metastore.api.InvalidInputException; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.SchemaVersion; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.RuntimeStat; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.ScheduledQuery; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryKey; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryMaintenanceRequest; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryPollRequest; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryPollResponse; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryProgressInfo; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WriteEventInfo; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.ColStatsObjWithSourceInfo; import org.apache.thrift.TException; @@ -1377,6 +1312,11 @@ public void scheduledQueryProgress(ScheduledQueryProgressInfo info) throw new RuntimeException("unimplemented"); } + @Override + public void addReplicationMetric(ReplicationMetricList replicationMetricList) { + throw new RuntimeException("unimplemented"); + } + @Override public ScheduledQuery getScheduledQuery(ScheduledQueryKey scheduleKey) throws MetaException, NoSuchObjectException { throw new RuntimeException("unimplemented"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 641df005ed..c082fd5c48 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -69,6 +69,9 @@ import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.parse.repl.load.EventDumpDirComparator; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.BootstrapLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsUtils; @@ -429,8 +432,17 @@ private Task getReplLoadRootTask(String sourceDb, String replicadb, boolean isIn HiveConf confTemp = new HiveConf(); confTemp.set("hive.repl.enable.move.optimization", "true"); Path loadPath = new Path(tuple.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); + ReplicationMetricCollector metricCollector; + if (isIncrementalDump) { + metricCollector = new IncrementalLoadMetricCollector(replicadb, tuple.dumpLocation, "", + confTemp); + } else { + metricCollector = new BootstrapLoadMetricCollector(replicadb, tuple.dumpLocation, "", + confTemp); + } ReplLoadWork replLoadWork = new ReplLoadWork(confTemp, loadPath.toString(), sourceDb, replicadb, - null, null, isIncrementalDump, Long.valueOf(tuple.lastReplId)); + null, null, isIncrementalDump, Long.valueOf(tuple.lastReplId), + 0L, metricCollector); Task replLoadTask = TaskFactory.get(replLoadWork, confTemp); replLoadTask.initialize(null, null, new TaskQueue(driver.getContext()), driver.getContext()); replLoadTask.executeTask(null); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java index a13d842183..3a59e22a0c 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java @@ -26,11 +26,20 @@ import org.apache.hadoop.hive.ql.exec.repl.ReplAck; import org.apache.hadoop.hive.ql.exec.repl.ReplDumpWork; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; +import org.apache.hadoop.hive.ql.parse.repl.metric.MetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metric; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Progress; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.scheduled.ScheduledQueryExecutionService; import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Before; import org.junit.After; +import org.junit.Assert; import org.junit.Test; import org.junit.BeforeClass; import org.junit.Ignore; @@ -41,6 +50,8 @@ import java.util.Base64; import java.util.Map; import java.util.HashMap; +import java.util.List; +import java.util.ArrayList; /** @@ -172,7 +183,12 @@ public void testAcidTablesReplLoadBootstrapIncr() throws Throwable { public void testExternalTablesReplLoadBootstrapIncr() throws Throwable { // Bootstrap String withClause = " WITH('" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname - + "'='/replica_external_base')"; + + "'='/replica_external_base', '" + HiveConf.ConfVars.REPL_INCLUDE_AUTHORIZATION_METADATA + + "' = 'true' ,'" + HiveConf.ConfVars.REPL_INCLUDE_ATLAS_METADATA + "' = 'true' , '" + + HiveConf.ConfVars.HIVE_IN_TEST + "' = 'true'" + ",'"+ HiveConf.ConfVars.REPL_ATLAS_ENDPOINT + + "' = 'http://localhost:21000/atlas'" + ",'"+ HiveConf.ConfVars.REPL_ATLAS_REPLICATED_TO_DB + "' = 'tgt'" + + ",'"+ HiveConf.ConfVars.REPL_SOURCE_CLUSTER_NAME + "' = 'cluster0'" + + ",'"+ HiveConf.ConfVars.REPL_TARGET_CLUSTER_NAME + "' = 'cluster1')"; primary.run("use " + primaryDbName) .run("create external table t2 (id int)") .run("insert into t2 values(1)") @@ -183,7 +199,7 @@ public void testExternalTablesReplLoadBootstrapIncr() throws Throwable { ReplDumpWork.injectNextDumpDirForTest(String.valueOf(next), true); primary.run("create scheduled query s1_t2 every 5 seconds as repl dump " + primaryDbName + withClause); replica.run("create scheduled query s2_t2 every 5 seconds as repl load " + primaryDbName + " INTO " - + replicatedDbName); + + replicatedDbName + withClause); Path dumpRoot = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR), Base64.getEncoder().encodeToString(primaryDbName.toLowerCase().getBytes(StandardCharsets.UTF_8.name()))); FileSystem fs = FileSystem.get(dumpRoot.toUri(), primary.hiveConf); @@ -196,7 +212,20 @@ public void testExternalTablesReplLoadBootstrapIncr() throws Throwable { .verifyResult("t2") .run("select id from t2 order by id") .verifyResults(new String[]{"1", "2"}); - + long lastReplId = Long.parseLong(primary.status(replicatedDbName).getOutput().get(0)); + DumpMetaData dumpMetaData = new DumpMetaData(ackPath.getParent(), primary.hiveConf); + List replicationMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(2, replicationMetrics.size()); + //Generate expected metrics + List expectedReplicationMetrics = new ArrayList<>(); + expectedReplicationMetrics.add(generateExpectedMetric("s1_t2", 0, primaryDbName, + Metadata.ReplicationType.BOOTSTRAP, ackPath.getParent().toString(), lastReplId, Status.SUCCESS, + generateDumpStages(true))); + expectedReplicationMetrics.add(generateExpectedMetric("s2_t2", + dumpMetaData.getDumpExecutionId(), replicatedDbName, + Metadata.ReplicationType.BOOTSTRAP, ackPath.getParent().toString(), lastReplId, Status.SUCCESS, + generateLoadStages(true))); + checkMetrics(expectedReplicationMetrics, replicationMetrics); // First incremental, after bootstrap primary.run("use " + primaryDbName) .run("insert into t2 values(3)") @@ -215,6 +244,130 @@ public void testExternalTablesReplLoadBootstrapIncr() throws Throwable { replica.run("drop scheduled query s2_t2"); } } + + private void checkMetrics(List expectedReplicationMetrics, + List actualMetrics) { + Assert.assertEquals(expectedReplicationMetrics.size(), actualMetrics.size()); + int metricCounter = 0; + for (ReplicationMetric actualMetric : actualMetrics) { + for (ReplicationMetric expecMetric : expectedReplicationMetrics) { + if (actualMetric.getPolicy().equalsIgnoreCase(expecMetric.getPolicy())) { + Assert.assertEquals(expecMetric.getDumpExecutionId(), actualMetric.getDumpExecutionId()); + Assert.assertEquals(expecMetric.getMetadata().getDbName(), actualMetric.getMetadata().getDbName()); + Assert.assertEquals(expecMetric.getMetadata().getLastReplId(), + actualMetric.getMetadata().getLastReplId()); + Assert.assertEquals(expecMetric.getMetadata().getStagingDir(), + actualMetric.getMetadata().getStagingDir()); + Assert.assertEquals(expecMetric.getMetadata().getReplicationType(), + actualMetric.getMetadata().getReplicationType()); + Assert.assertEquals(expecMetric.getProgress().getStatus(), actualMetric.getProgress().getStatus()); + Assert.assertEquals(expecMetric.getProgress().getStages().size(), + actualMetric.getProgress().getStages().size()); + List expectedStages = expecMetric.getProgress().getStages(); + List actualStages = actualMetric.getProgress().getStages(); + int counter = 0; + for (Stage actualStage : actualStages) { + for (Stage expeStage : expectedStages) { + if (actualStage.getName().equalsIgnoreCase(expeStage.getName())) { + Assert.assertEquals(expeStage.getStatus(), actualStage.getStatus()); + Assert.assertEquals(expeStage.getMetrics().size(), actualStage.getMetrics().size()); + for (Metric actMetric : actualStage.getMetrics()) { + for (Metric expMetric : expeStage.getMetrics()) { + if (actMetric.getName().equalsIgnoreCase(expMetric.getName())) { + Assert.assertEquals(expMetric.getTotalCount(), actMetric.getTotalCount()); + Assert.assertEquals(expMetric.getCurrentCount(), actMetric.getCurrentCount()); + } + } + } + counter++; + if (counter == actualStages.size()) { + break; + } + } + } + } + metricCounter++; + if (metricCounter == actualMetrics.size()) { + break; + } + } + } + } + } + + private List generateLoadStages(boolean isBootstrap) { + List stages = new ArrayList<>(); + //Ranger + Stage rangerDump = new Stage("RANGER_LOAD", Status.SUCCESS, 0); + Metric rangerMetric = new Metric(ReplUtils.MetricName.POLICIES.name(), 0); + rangerDump.addMetric(rangerMetric); + stages.add(rangerDump); + //Atlas + Stage atlasDump = new Stage("ATLAS_LOAD", Status.SUCCESS, 0); + Metric atlasMetric = new Metric(ReplUtils.MetricName.TAGS.name(), 0); + atlasDump.addMetric(atlasMetric); + stages.add(atlasDump); + //Hive + Stage replDump = new Stage("REPL_LOAD", Status.SUCCESS, 0); + if (isBootstrap) { + Metric hiveMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 1); + hiveMetric.setCurrentCount(1); + replDump.addMetric(hiveMetric); + hiveMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 0); + replDump.addMetric(hiveMetric); + } else { + Metric hiveMetric = new Metric(ReplUtils.MetricName.EVENTS.name(), 1); + hiveMetric.setCurrentCount(1); + replDump.addMetric(hiveMetric); + } + stages.add(replDump); + return stages; + } + + private List generateDumpStages(boolean isBootstrap) { + List stages = new ArrayList<>(); + //Ranger + Stage rangerDump = new Stage("RANGER_DUMP", Status.SUCCESS, 0); + Metric rangerMetric = new Metric(ReplUtils.MetricName.POLICIES.name(), 0); + rangerDump.addMetric(rangerMetric); + stages.add(rangerDump); + //Atlas + Stage atlasDump = new Stage("ATLAS_DUMP", Status.SUCCESS, 0); + Metric atlasMetric = new Metric(ReplUtils.MetricName.TAGS.name(), 0); + atlasDump.addMetric(atlasMetric); + stages.add(atlasDump); + //Hive + Stage replDump = new Stage("REPL_DUMP", Status.SUCCESS, 0); + if (isBootstrap) { + Metric hiveMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 1); + hiveMetric.setCurrentCount(1); + replDump.addMetric(hiveMetric); + hiveMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 0); + replDump.addMetric(hiveMetric); + } else { + Metric hiveMetric = new Metric(ReplUtils.MetricName.EVENTS.name(), 1); + hiveMetric.setCurrentCount(1); + replDump.addMetric(hiveMetric); + } + stages.add(replDump); + return stages; + } + + private ReplicationMetric generateExpectedMetric(String policy, long dumpExecId, String dbName, + Metadata.ReplicationType replicationType, String staging, + long lastReplId, Status status, List stages) { + Metadata metadata = new Metadata(dbName, replicationType, staging); + metadata.setLastReplId(lastReplId); + ReplicationMetric replicationMetric = new ReplicationMetric(0, policy, dumpExecId, metadata); + Progress progress = new Progress(); + progress.setStatus(status); + for (Stage stage : stages) { + progress.addStage(stage); + } + replicationMetric.setProgress(progress); + return replicationMetric; + } + private void waitForAck(FileSystem fs, Path ackFile, long timeout) throws IOException { long oldTime = System.currentTimeMillis(); long sleepInterval = 2; diff --git a/metastore/scripts/upgrade/derby/058-HIVE-23516.derby.sql b/metastore/scripts/upgrade/derby/058-HIVE-23516.derby.sql new file mode 100644 index 0000000000..da0ecf285f --- /dev/null +++ b/metastore/scripts/upgrade/derby/058-HIVE-23516.derby.sql @@ -0,0 +1,13 @@ +--Create table replication metrics +CREATE TABLE "APP"."REPLICATION_METRICS" ( + "RM_SCHID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMPSCHID" bigint NOT NULL, + "RM_META" varchar, + "RM_PROGRESS" varchar, + PRIMARY KEY("RM_SCHID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "APP"."REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "APP"."REPLICATION_METRICS" ("RM_DUMPSCHID"); \ No newline at end of file diff --git a/metastore/scripts/upgrade/mssql/035-HIVE-23516.mssql.sql b/metastore/scripts/upgrade/mssql/035-HIVE-23516.mssql.sql new file mode 100644 index 0000000000..e259ebc752 --- /dev/null +++ b/metastore/scripts/upgrade/mssql/035-HIVE-23516.mssql.sql @@ -0,0 +1,13 @@ +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMPSCHID" bigint NOT NULL, + "RM_META" varchar, + "RM_PROGRESS" varchar, + PRIMARY KEY("RM_SCHID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMPSCHID"); \ No newline at end of file diff --git a/metastore/scripts/upgrade/mysql/050-HIVE-23516.mysql.sql b/metastore/scripts/upgrade/mysql/050-HIVE-23516.mysql.sql new file mode 100644 index 0000000000..e259ebc752 --- /dev/null +++ b/metastore/scripts/upgrade/mysql/050-HIVE-23516.mysql.sql @@ -0,0 +1,13 @@ +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMPSCHID" bigint NOT NULL, + "RM_META" varchar, + "RM_PROGRESS" varchar, + PRIMARY KEY("RM_SCHID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMPSCHID"); \ No newline at end of file diff --git a/metastore/scripts/upgrade/oracle/050-HIVE-23516.oracle.sql b/metastore/scripts/upgrade/oracle/050-HIVE-23516.oracle.sql new file mode 100644 index 0000000000..63b8809d88 --- /dev/null +++ b/metastore/scripts/upgrade/oracle/050-HIVE-23516.oracle.sql @@ -0,0 +1,13 @@ +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHID" number NOT NULL, + "RM_POLICY" varchar2(256) NOT NULL, + "RM_DUMPSCHID" number NOT NULL, + "RM_META" varchar2, + "RM_PROGRESS" varchar2, + PRIMARY KEY("RM_SCHID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMPSCHID"); \ No newline at end of file diff --git a/metastore/scripts/upgrade/postgres/049-HIVE-23516.postgres.sql b/metastore/scripts/upgrade/postgres/049-HIVE-23516.postgres.sql new file mode 100644 index 0000000000..e259ebc752 --- /dev/null +++ b/metastore/scripts/upgrade/postgres/049-HIVE-23516.postgres.sql @@ -0,0 +1,13 @@ +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMPSCHID" bigint NOT NULL, + "RM_META" varchar, + "RM_PROGRESS" varchar, + PRIMARY KEY("RM_SCHID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMPSCHID"); \ No newline at end of file diff --git a/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp b/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp index c7338e129c..e64ba65ac8 100644 --- a/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp +++ b/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp @@ -153,7 +153,7 @@ const char* _kStageTypeNames[] = { "RANGER_DUMP", "RANGER_LOAD", "ATLAS_DUMP", - "ATLAS_LOAD + "ATLAS_LOAD" }; const std::map _StageType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(23, _kStageTypeValues, _kStageTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java index be48f99c59..9f6ccb5533 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; import org.apache.hadoop.hive.ql.parse.repl.dump.log.AtlasDumpLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,11 +48,12 @@ import java.net.MalformedURLException; import java.net.URL; import java.nio.charset.Charset; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; import java.util.Map; +import java.util.HashMap; import java.util.Set; +import java.util.List; +import java.util.Arrays; +import java.util.ArrayList; /** * Atlas Metadata Replication Dump Task. @@ -71,6 +73,9 @@ public int execute() { AtlasDumpLogger replLogger = new AtlasDumpLogger(atlasReplInfo.getSrcDB(), atlasReplInfo.getStagingDir().toString()); replLogger.startLog(); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TAGS.name(), 0L); + work.getMetricCollector().reportStageStart(getName(), metricMap); atlasRestClient = new AtlasRestClientBuilder(atlasReplInfo.getAtlasEndpoint()) .getClient(atlasReplInfo.getConf()); AtlasRequestBuilder atlasRequestBuilder = new AtlasRequestBuilder(); @@ -81,10 +86,16 @@ public int execute() { LOG.debug("Finished dumping atlas metadata, total:{} bytes written", numBytesWritten); createDumpMetadata(atlasReplInfo, currentModifiedTime); replLogger.endLog(0L); + work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS); return 0; } catch (Exception e) { LOG.error("Exception while dumping atlas metadata", e); setException(e); + try { + work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); + } catch (SemanticException ex) { + LOG.error("Failed to collect Metrics ", ex); + } return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpWork.java index 3344152f43..3f10730be4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpWork.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.exec.repl; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -34,13 +35,16 @@ private final Path stagingDir; private final boolean bootstrap; private final Path prevAtlasDumpDir; + private final transient ReplicationMetricCollector metricCollector; - public AtlasDumpWork(String srcDB, Path stagingDir, boolean bootstrap, Path prevAtlasDumpDir) { + public AtlasDumpWork(String srcDB, Path stagingDir, boolean bootstrap, Path prevAtlasDumpDir, + ReplicationMetricCollector metricCollector) { this.srcDB = srcDB; this.stagingDir = stagingDir; this.bootstrap = bootstrap; this.prevAtlasDumpDir = prevAtlasDumpDir; + this.metricCollector = metricCollector; } public boolean isBootstrap() { @@ -58,4 +62,8 @@ public String getSrcDB() { public Path getStagingDir() { return stagingDir; } + + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java index fa18bf3236..487afc4eef 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java @@ -31,8 +31,8 @@ import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.EximUtil; import org.apache.hadoop.hive.ql.parse.SemanticException; - import org.apache.hadoop.hive.ql.parse.repl.load.log.AtlasLoadLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,6 +44,8 @@ import java.net.MalformedURLException; import java.net.URL; import java.nio.charset.Charset; +import java.util.HashMap; +import java.util.Map; /** * Atlas Metadata Replication Load Task. @@ -56,6 +58,9 @@ public int execute() { try { AtlasReplInfo atlasReplInfo = createAtlasReplInfo(); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TAGS.name(), 0L); + work.getMetricCollector().reportStageStart(getName(), metricMap); LOG.info("Loading atlas metadata from srcDb: {} to tgtDb: {} from staging: {}", atlasReplInfo.getSrcDB(), atlasReplInfo.getTgtDB(), atlasReplInfo.getStagingDir()); AtlasLoadLogger replLogger = new AtlasLoadLogger(atlasReplInfo.getSrcDB(), atlasReplInfo.getTgtDB(), @@ -63,11 +68,18 @@ public int execute() { replLogger.startLog(); int importCount = importAtlasMetadata(atlasReplInfo); replLogger.endLog(importCount); + work.getMetricCollector().reportStageProgress(getName(), ReplUtils.MetricName.TAGS.name(), importCount); LOG.info("Atlas entities import count {}", importCount); + work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS); return 0; } catch (Exception e) { LOG.error("Exception while loading atlas metadata", e); setException(e); + try { + work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); + } catch (SemanticException ex) { + LOG.error("Failed to collect Metrics ", ex); + } return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadWork.java index 4dc1ea81a6..817c214675 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadWork.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.exec.repl; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -33,11 +34,13 @@ private final String srcDB; private final String tgtDB; private final Path stagingDir; + private final transient ReplicationMetricCollector metricCollector; - public AtlasLoadWork(String srcDB, String tgtDB, Path stagingDir) { + public AtlasLoadWork(String srcDB, String tgtDB, Path stagingDir, ReplicationMetricCollector metricCollector) { this.srcDB = srcDB; this.tgtDB = tgtDB; this.stagingDir = stagingDir; + this.metricCollector = metricCollector; } public static long getSerialVersionUID() { @@ -55,4 +58,8 @@ public String getTgtDB() { public Path getStagingDir() { return stagingDir; } + + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java index 5a56a6be95..92ca6ea6ed 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java @@ -33,13 +33,16 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; import org.apache.hadoop.hive.ql.parse.repl.dump.log.RangerDumpLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Serializable; import java.net.URL; +import java.util.HashMap; import java.util.List; +import java.util.Map; /** * RangerDumpTask. @@ -77,6 +80,11 @@ public int execute() { long exportCount = 0; Path filePath = null; LOG.info("Exporting Ranger Metadata"); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.POLICIES.name(), 0L); + work.getMetricCollector().reportStageStart(getName(), metricMap); + replLogger = new RangerDumpLogger(work.getDbName(), work.getCurrentDumpPath().toString()); + replLogger.startLog(); if (rangerRestClient == null) { rangerRestClient = getRangerRestClient(); } @@ -91,8 +99,6 @@ public int execute() { if (StringUtils.isEmpty(rangerEndpoint) || !rangerRestClient.checkConnection(rangerEndpoint)) { throw new SemanticException("Ranger endpoint is not valid " + rangerEndpoint); } - replLogger = new RangerDumpLogger(work.getDbName(), work.getCurrentDumpPath().toString()); - replLogger.startLog(); RangerExportPolicyList rangerExportPolicyList = rangerRestClient.exportRangerPolicies(rangerEndpoint, work.getDbName(), rangerHiveServiceName); List rangerPolicies = rangerExportPolicyList.getPolicies(); @@ -109,15 +115,22 @@ public int execute() { if (filePath != null) { LOG.info("Ranger policy export finished successfully"); exportCount = rangerExportPolicyList.getListSize(); + work.getMetricCollector().reportStageProgress(getName(), ReplUtils.MetricName.POLICIES.name(), exportCount); } } replLogger.endLog(exportCount); + work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS); LOG.debug("Ranger policy export filePath:" + filePath); LOG.info("Number of ranger policies exported {}", exportCount); return 0; } catch (Exception e) { LOG.error("failed", e); setException(e); + try { + work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); + } catch (SemanticException ex) { + LOG.error("Failed to collect Metrics ", ex); + } return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpWork.java index 026402b43e..b1393b20d5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpWork.java @@ -19,6 +19,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import java.io.Serializable; @@ -36,10 +37,12 @@ private static final long serialVersionUID = 1L; private Path currentDumpPath; private String dbName; + private final transient ReplicationMetricCollector metricCollector; - public RangerDumpWork(Path currentDumpPath, String dbName) { + public RangerDumpWork(Path currentDumpPath, String dbName, ReplicationMetricCollector metricCollector) { this.currentDumpPath = currentDumpPath; this.dbName = dbName; + this.metricCollector = metricCollector; } public Path getCurrentDumpPath() { @@ -53,4 +56,8 @@ public String getDbName() { URL getRangerConfigResource() { return getClass().getClassLoader().getResource(ReplUtils.RANGER_CONFIGURATION_RESOURCE_NAME); } + + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java index 4e8a44fdae..fa57efd2fc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; import org.apache.hadoop.hive.ql.parse.repl.load.log.RangerLoadLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,7 +41,9 @@ import java.io.Serializable; import java.net.URL; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_RANGER_ADD_DENY_POLICY_TARGET; /** @@ -101,6 +104,9 @@ public int execute() { replLogger = new RangerLoadLogger(work.getSourceDbName(), work.getTargetDbName(), work.getCurrentDumpPath().toString(), expectedPolicyCount); replLogger.startLog(); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.POLICIES.name(), (long) expectedPolicyCount); + work.getMetricCollector().reportStageStart(getName(), metricMap); if (rangerExportPolicyList != null && !CollectionUtils.isEmpty(rangerExportPolicyList.getPolicies())) { rangerPolicies = rangerExportPolicyList.getPolicies(); } @@ -129,13 +135,20 @@ public int execute() { rangerHiveServiceName); LOG.info("Number of ranger policies imported {}", rangerExportPolicyList.getListSize()); importCount = rangerExportPolicyList.getListSize(); + work.getMetricCollector().reportStageProgress(getName(), ReplUtils.MetricName.POLICIES.name(), importCount); replLogger.endLog(importCount); LOG.info("Ranger policy import finished {} ", importCount); } + work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS); return 0; } catch (Exception e) { LOG.error("Failed", e); setException(e); + try { + work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); + } catch (SemanticException ex) { + LOG.error("Failed to collect Metrics", ex); + } return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadWork.java index cddca6076a..f42575b85d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadWork.java @@ -19,6 +19,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,11 +41,14 @@ private Path currentDumpPath; private String targetDbName; private String sourceDbName; + private final transient ReplicationMetricCollector metricCollector; - public RangerLoadWork(Path currentDumpPath, String sourceDbName, String targetDbName) { + public RangerLoadWork(Path currentDumpPath, String sourceDbName, String targetDbName, + ReplicationMetricCollector metricCollector) { this.currentDumpPath = currentDumpPath; this.targetDbName = targetDbName; this.sourceDbName = sourceDbName; + this.metricCollector = metricCollector; } public Path getCurrentDumpPath() { @@ -62,4 +66,8 @@ public String getSourceDbName() { URL getRangerConfigResource() { return getClass().getClassLoader().getResource(ReplUtils.RANGER_CONFIGURATION_RESOURCE_NAME); } + + ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java index 046b6a00de..3057cdb7f1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.ReplChangeManager; @@ -36,6 +37,7 @@ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.messaging.event.filters.AndFilter; import org.apache.hadoop.hive.metastore.messaging.event.filters.EventBoundaryFilter; import org.apache.hadoop.hive.metastore.messaging.event.filters.ReplEventFilter; @@ -71,7 +73,11 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.io.JsonWriter; import org.apache.hadoop.hive.ql.parse.repl.dump.log.BootstrapDumpLogger; import org.apache.hadoop.hive.ql.parse.repl.dump.log.IncrementalDumpLogger; +import org.apache.hadoop.hive.ql.parse.repl.dump.metric.BootstrapDumpMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.ExportWork.MmContext; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.thrift.TException; @@ -97,6 +103,8 @@ import java.util.LinkedList; import java.util.UUID; import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; import java.util.concurrent.TimeUnit; import static org.apache.hadoop.hive.ql.exec.repl.ReplExternalTables.Writer; import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT; @@ -147,6 +155,7 @@ public int execute() { Path currentDumpPath = getCurrentDumpPath(dumpRoot, isBootstrap); Path hiveDumpRoot = new Path(currentDumpPath, ReplUtils.REPL_HIVE_BASE_DIR); work.setCurrentDumpPath(currentDumpPath); + work.setMetricCollector(initMetricCollection(isBootstrap, hiveDumpRoot)); if (shouldDumpAtlasMetadata()) { addAtlasDumpTask(isBootstrap, previousValidHiveDumpPath); LOG.info("Added task to dump atlas metadata."); @@ -174,6 +183,11 @@ public int execute() { } catch (Exception e) { LOG.error("failed", e); setException(e); + try { + work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); + } catch (SemanticException ex) { + LOG.error("Failed to collect Metrics", ex); + } return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); } return 0; @@ -183,7 +197,8 @@ private void initiateAuthorizationDumpTask() throws SemanticException { if (RANGER_AUTHORIZER.equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.REPL_AUTHORIZATION_PROVIDER_SERVICE))) { Path rangerDumpRoot = new Path(work.getCurrentDumpPath(), ReplUtils.REPL_RANGER_BASE_DIR); LOG.info("Exporting Authorization Metadata from {} at {} ", RANGER_AUTHORIZER, rangerDumpRoot); - RangerDumpWork rangerDumpWork = new RangerDumpWork(rangerDumpRoot, work.dbNameOrPattern); + RangerDumpWork rangerDumpWork = new RangerDumpWork(rangerDumpRoot, work.dbNameOrPattern, + work.getMetricCollector()); Task rangerDumpTask = TaskFactory.get(rangerDumpWork, conf); if (childTasks == null) { childTasks = new ArrayList<>(); @@ -240,7 +255,8 @@ private void addAtlasDumpTask(boolean bootstrap, Path prevHiveDumpDir) { Path atlasDumpDir = new Path(work.getCurrentDumpPath(), ReplUtils.REPL_ATLAS_BASE_DIR); Path prevAtlasDumpDir = prevHiveDumpDir == null ? null : new Path(prevHiveDumpDir.getParent(), ReplUtils.REPL_ATLAS_BASE_DIR); - AtlasDumpWork atlasDumpWork = new AtlasDumpWork(work.dbNameOrPattern, atlasDumpDir, bootstrap, prevAtlasDumpDir); + AtlasDumpWork atlasDumpWork = new AtlasDumpWork(work.dbNameOrPattern, atlasDumpDir, bootstrap, prevAtlasDumpDir, + work.getMetricCollector()); Task atlasDumpTask = TaskFactory.get(atlasDumpWork, conf); childTasks = new ArrayList<>(); childTasks.add(atlasDumpTask); @@ -253,6 +269,7 @@ private void finishRemainingTasks() throws SemanticException { + ReplAck.DUMP_ACKNOWLEDGEMENT.toString()); Utils.create(dumpAckFile, conf); prepareReturnValues(work.getResultValues()); + work.getMetricCollector().reportEnd(Status.SUCCESS); deleteAllPreviousDumpMeta(work.getCurrentDumpPath()); } @@ -449,9 +466,7 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive long bootDumpBeginReplId = -1; List managedTableCopyPaths = Collections.emptyList(); List extTableCopyWorks = Collections.emptyList(); - List tableList = work.replScope.includeAllTables() ? null : new ArrayList<>(); - // If we are bootstrapping ACID tables, we need to perform steps similar to a regular // bootstrap (See bootstrapDump() for more details. Only difference here is instead of // waiting for the concurrent transactions to finish, we start dumping the incremental events @@ -465,29 +480,21 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive HiveConf.ConfVars.REPL_BOOTSTRAP_DUMP_OPEN_TXN_TIMEOUT, TimeUnit.MILLISECONDS); waitUntilTime = System.currentTimeMillis() + timeoutInMs; } - // TODO : instead of simply restricting by message format, we should eventually // move to a jdbc-driver-stype registering of message format, and picking message // factory per event to decode. For now, however, since all messages have the // same factory, restricting by message format is effectively a guard against // older leftover data that would cause us problems. - work.overrideLastEventToDump(hiveDb, bootDumpBeginReplId); - IMetaStoreClient.NotificationFilter evFilter = new AndFilter( new ReplEventFilter(work.replScope), new EventBoundaryFilter(work.eventFrom, work.eventTo)); - EventUtils.MSClientNotificationFetcher evFetcher = new EventUtils.MSClientNotificationFetcher(hiveDb); - - int maxEventLimit = getMaxEventAllowed(work.maxEventLimit()); EventUtils.NotificationEventIterator evIter = new EventUtils.NotificationEventIterator( evFetcher, work.eventFrom, maxEventLimit, evFilter); - lastReplId = work.eventTo; - Path ackFile = new Path(dumpRoot, ReplAck.EVENTS_DUMP.toString()); long resumeFrom = Utils.fileExists(ackFile, conf) ? getResumeFrom(ackFile) : work.eventFrom; @@ -499,10 +506,14 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive String dbName = (null != work.dbNameOrPattern && !work.dbNameOrPattern.isEmpty()) ? work.dbNameOrPattern : "?"; - replLogger = new IncrementalDumpLogger(dbName, dumpRoot.toString(), - evFetcher.getDbNotificationEventsCount(work.eventFrom, dbName, work.eventTo, maxEventLimit), + long estimatedNumEvents = evFetcher.getDbNotificationEventsCount(work.eventFrom, dbName, work.eventTo, + maxEventLimit); + replLogger = new IncrementalDumpLogger(dbName, dumpRoot.toString(), estimatedNumEvents, work.eventFrom, work.eventTo, maxEventLimit); replLogger.startLog(); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.EVENTS.name(), estimatedNumEvents); + work.getMetricCollector().reportStageStart(getName(), metricMap); long dumpedCount = resumeFrom - work.eventFrom; if (dumpedCount > 0) { LOG.info("Event id {} to {} are already dumped, skipping {} events", work.eventFrom, resumeFrom, dumpedCount); @@ -518,19 +529,16 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive dumpEvent(ev, evRoot, dumpRoot, cmRoot, hiveDb); Utils.writeOutput(String.valueOf(lastReplId), ackFile, conf); } - replLogger.endLog(lastReplId.toString()); - LOG.info("Done dumping events, preparing to return {},{}", dumpRoot.toUri(), lastReplId); - dmd.setDump(DumpType.INCREMENTAL, work.eventFrom, lastReplId, cmRoot); - + long executorId = conf.getLong(Constants.SCHEDULED_QUERY_EXECUTIONID, 0L); + dmd.setDump(DumpType.INCREMENTAL, work.eventFrom, lastReplId, cmRoot, executorId); // If repl policy is changed (oldReplScope is set), then pass the current replication policy, // so that REPL LOAD would drop the tables which are not included in current policy. if (work.oldReplScope != null) { dmd.setReplScope(work.replScope); } dmd.write(true); - // Examine all the tables if required. if (shouldExamineTablesToDump() || (tableList != null)) { // If required wait more for any transactions open at the time of starting the ACID bootstrap. @@ -538,7 +546,6 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive assert (waitUntilTime > 0); validTxnList = getValidTxnListForReplDump(hiveDb, waitUntilTime); } - /* When same dump dir is resumed because of check-pointing, we need to clear the existing metadata. We need to rewrite the metadata as the write id list will be changed. We can't reuse the previous write id as it might be invalid due to compaction. */ @@ -587,9 +594,20 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive } work.setDirCopyIterator(extTableCopyWorks.iterator()); work.setManagedTableCopyPathIterator(managedTableCopyPaths.iterator()); + work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS, lastReplId); return lastReplId; } + private ReplicationMetricCollector initMetricCollection(boolean isBootstrap, Path dumpRoot) { + ReplicationMetricCollector collector; + if (isBootstrap) { + collector = new BootstrapDumpMetricCollector(work.dbNameOrPattern, dumpRoot.toString(), conf); + } else { + collector = new IncrementalDumpMetricCollector(work.dbNameOrPattern, dumpRoot.toString(), conf); + } + return collector; + } + private int getMaxEventAllowed(int currentEventMaxLimit) { int maxDirItems = Integer.parseInt(conf.get(ReplUtils.DFS_MAX_DIR_ITEMS_CONFIG, "0")); if (maxDirItems > 0) { @@ -603,7 +621,6 @@ private int getMaxEventAllowed(int currentEventMaxLimit) { } return currentEventMaxLimit; } - private void cleanFailedEventDirIfExists(Path dumpDir, long resumeFrom) throws IOException { Path nextEventRoot = new Path(dumpDir, String.valueOf(resumeFrom + 1)); Retry retriable = new Retry(IOException.class) { @@ -674,6 +691,7 @@ private void dumpEvent(NotificationEvent ev, Path evRoot, Path dumpRoot, Path cm ); EventHandler eventHandler = EventHandlerFactory.handlerFor(ev); eventHandler.handle(context); + work.getMetricCollector().reportStageProgress(getName(), ReplUtils.MetricName.EVENTS.name(), 1); replLogger.eventLog(String.valueOf(ev.getEventId()), eventHandler.dumpType().toString()); } @@ -779,10 +797,16 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb) throw new HiveException("Replication dump not allowed for replicated database" + " with first incremental dump pending : " + dbName); } + int estimatedNumTables = Utils.getAllTables(hiveDb, dbName, work.replScope).size(); + int estimatedNumFunctions = hiveDb.getAllFunctions().size(); replLogger = new BootstrapDumpLogger(dbName, dumpRoot.toString(), - Utils.getAllTables(hiveDb, dbName, work.replScope).size(), - hiveDb.getAllFunctions().size()); + estimatedNumTables, + estimatedNumFunctions); replLogger.startLog(); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) estimatedNumTables); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) estimatedNumFunctions); + work.getMetricCollector().reportStageStart(getName(), metricMap); Path dbRoot = dumpDbMetadata(dbName, metadataPath, bootDumpBeginReplId, hiveDb); Path dbDataRoot = new Path(new Path(dumpRoot, EximUtil.DATA_PATH_NAME), dbName); dumpFunctionMetadata(dbName, dbRoot, hiveDb); @@ -841,11 +865,13 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb) Long bootDumpEndReplId = currentNotificationId(hiveDb); LOG.info("Preparing to return {},{}->{}", dumpRoot.toUri(), bootDumpBeginReplId, bootDumpEndReplId); - dmd.setDump(DumpType.BOOTSTRAP, bootDumpBeginReplId, bootDumpEndReplId, cmRoot); + long executorId = conf.getLong(Constants.SCHEDULED_QUERY_EXECUTIONID, 0L); + dmd.setDump(DumpType.BOOTSTRAP, bootDumpBeginReplId, bootDumpEndReplId, cmRoot, executorId); dmd.write(true); work.setDirCopyIterator(extTableCopyWorks.iterator()); work.setManagedTableCopyPathIterator(managedTableCopyPaths.iterator()); + work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS, bootDumpBeginReplId); return bootDumpBeginReplId; } @@ -912,7 +938,9 @@ Path dumpDbMetadata(String dbName, Path metadataRoot, long lastReplId, Hive hive MmContext mmCtx = MmContext.createIfNeeded(tableSpec.tableHandle); tuple.replicationSpec.setRepl(true); List managedTableCopyPaths = new TableExport( - exportPaths, tableSpec, tuple.replicationSpec, hiveDb, distCpDoAsUser, conf, mmCtx).write(false); + exportPaths, tableSpec, tuple.replicationSpec, hiveDb, distCpDoAsUser, + conf, mmCtx).write(false); + work.getMetricCollector().reportStageProgress(getName(), ReplUtils.MetricName.TABLES.name(), 1); replLogger.tableLog(tblName, tableSpec.tableHandle.getTableType()); if (tableSpec.tableHandle.getTableType().equals(TableType.EXTERNAL_TABLE) || Utils.shouldDumpMetaDataOnly(conf)) { @@ -1042,6 +1070,7 @@ void dumpFunctionMetadata(String dbName, Path dbMetadataRoot, Hive hiveDb) throw FunctionSerializer serializer = new FunctionSerializer(tuple.object, conf); serializer.writeTo(jsonWriter, tuple.replicationSpec); } + work.getMetricCollector().reportStageProgress(getName(), ReplUtils.MetricName.FUNCTIONS.name(), 1); replLogger.functionLog(functionName); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java index 86f92338a6..59cae6b9fa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.ql.exec.repl.util.TaskTracker; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.parse.EximUtil; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,6 +57,7 @@ private Path currentDumpPath; private List resultValues; private boolean shouldOverwrite; + private transient ReplicationMetricCollector metricCollector; public static void injectNextDumpDirForTest(String dumpDir) { injectNextDumpDirForTest(dumpDir, false); @@ -190,4 +192,12 @@ public void setResultValues(List resultValues) { public void setShouldOverwrite(boolean shouldOverwrite) { this.shouldOverwrite = shouldOverwrite; } + + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } + + public void setMetricCollector(ReplicationMetricCollector metricCollector) { + this.metricCollector = metricCollector; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java index 792e331884..37cc6cd454 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java @@ -54,12 +54,13 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.HiveTableName; -import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; import org.apache.hadoop.hive.ql.parse.repl.load.MetaData; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.api.StageType; import java.io.IOException; @@ -120,10 +121,20 @@ public int execute() { } } catch (RuntimeException e) { LOG.error("replication failed with run time exception", e); + try { + work.getMetricCollector().reportEnd(Status.FAILED); + } catch (SemanticException ex) { + LOG.error("Failed to collect Metrics ", ex); + } throw e; } catch (Exception e) { LOG.error("replication failed", e); setException(e); + try { + work.getMetricCollector().reportEnd(Status.FAILED); + } catch (SemanticException ex) { + LOG.error("Failed to collect Metrics ", ex); + } return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); } } @@ -136,7 +147,8 @@ private void initiateAuthorizationLoadTask() throws SemanticException { if (RANGER_AUTHORIZER.equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.REPL_AUTHORIZATION_PROVIDER_SERVICE))) { Path rangerLoadRoot = new Path(new Path(work.dumpDirectory).getParent(), ReplUtils.REPL_RANGER_BASE_DIR); LOG.info("Adding Import Ranger Metadata Task from {} ", rangerLoadRoot); - RangerLoadWork rangerLoadWork = new RangerLoadWork(rangerLoadRoot, work.getSourceDbName(), work.dbNameToLoadIn); + RangerLoadWork rangerLoadWork = new RangerLoadWork(rangerLoadRoot, work.getSourceDbName(), work.dbNameToLoadIn, + work.getMetricCollector()); Task rangerLoadTask = TaskFactory.get(rangerLoadWork, conf); if (childTasks == null) { childTasks = new ArrayList<>(); @@ -151,7 +163,8 @@ private void initiateAuthorizationLoadTask() throws SemanticException { private void addAtlasLoadTask() throws HiveException { Path atlasDumpDir = new Path(new Path(work.dumpDirectory).getParent(), ReplUtils.REPL_ATLAS_BASE_DIR); LOG.info("Adding task to load Atlas metadata from {} ", atlasDumpDir); - AtlasLoadWork atlasLoadWork = new AtlasLoadWork(work.getSourceDbName(), work.dbNameToLoadIn, atlasDumpDir); + AtlasLoadWork atlasLoadWork = new AtlasLoadWork(work.getSourceDbName(), work.dbNameToLoadIn, atlasDumpDir, + work.getMetricCollector()); Task atlasLoadTask = TaskFactory.get(atlasLoadWork, conf); if (childTasks == null) { childTasks = new ArrayList<>(); @@ -228,7 +241,7 @@ a database ( directory ) tableTracker.addTask(createViewTask(tableEvent.getMetaData(), work.dbNameToLoadIn, conf)); } else { LoadTable loadTable = new LoadTable(tableEvent, loadContext, iterator.replLogger(), tableContext, - loadTaskTracker); + loadTaskTracker, work.getMetricCollector()); tableTracker = loadTable.tasks(work.isIncrementalLoad()); } @@ -254,7 +267,7 @@ a database ( directory ) // for a table we explicitly try to load partitions as there is no separate partitions events. LoadPartitions loadPartitions = new LoadPartitions(loadContext, iterator.replLogger(), loadTaskTracker, tableEvent, - work.dbNameToLoadIn, tableContext); + work.dbNameToLoadIn, tableContext, work.getMetricCollector()); TaskTracker partitionsTracker = loadPartitions.tasks(); partitionsPostProcessing(iterator, scope, loadTaskTracker, tableTracker, partitionsTracker); @@ -321,7 +334,7 @@ private TaskTracker addLoadPartitionTasks(Context loadContext, BootstrapEvent ne TableContext tableContext = new TableContext(dbTracker, work.dbNameToLoadIn); LoadPartitions loadPartitions = new LoadPartitions(loadContext, iterator.replLogger(), tableContext, loadTaskTracker, - event.asTableEvent(), work.dbNameToLoadIn, event.lastPartitionReplicated()); + event.asTableEvent(), work.dbNameToLoadIn, event.lastPartitionReplicated(), work.getMetricCollector()); /* the tableTracker here should be a new instance and not an existing one as this can only happen when we break in between loading partitions. @@ -348,7 +361,7 @@ private TaskTracker addLoadConstraintsTasks(Context loadContext, private TaskTracker addLoadFunctionTasks(Context loadContext, BootstrapEventsIterator iterator, BootstrapEvent next, TaskTracker dbTracker, Scope scope) throws IOException, SemanticException { LoadFunction loadFunction = new LoadFunction(loadContext, iterator.replLogger(), - (FunctionEvent) next, work.dbNameToLoadIn, dbTracker); + (FunctionEvent) next, work.dbNameToLoadIn, dbTracker, work.getMetricCollector()); TaskTracker functionsTracker = loadFunction.tasks(); if (!scope.database) { scope.rootTasks.addAll(functionsTracker.tasks()); @@ -442,7 +455,7 @@ private void createEndReplLogTask(Context context, Scope scope, Database dbInMetadata = work.databaseEvent(context.hiveConf).dbInMetadata(work.dbNameToLoadIn); dbProps = dbInMetadata.getParameters(); } - ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, dbProps); + ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, dbProps, work.getMetricCollector()); Task replLogTask = TaskFactory.get(replLogWork, conf); if (scope.rootTasks.isEmpty()) { scope.rootTasks.add(replLogTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java index 26cd59b082..43bf365b4f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java @@ -29,6 +29,8 @@ import org.apache.hadoop.hive.ql.exec.repl.incremental.IncrementalLoadTasksBuilder; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.EximUtil; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.session.LineageState; import org.apache.hadoop.hive.ql.exec.Task; @@ -45,6 +47,8 @@ final String dumpDirectory; private boolean lastReplIDUpdated; private String sourceDbName; + private Long dumpExecutionId; + private final transient ReplicationMetricCollector metricCollector; private final ConstraintEventsIterator constraintsIterator; private int loadTaskRunCount = 0; @@ -62,12 +66,17 @@ public ReplLoadWork(HiveConf hiveConf, String dumpDirectory, String sourceDbName, String dbNameToLoadIn, ReplScope currentReplScope, - LineageState lineageState, boolean isIncrementalDump, Long eventTo) throws IOException { + LineageState lineageState, boolean isIncrementalDump, Long eventTo, + Long dumpExecutionId, + ReplicationMetricCollector metricCollector) throws IOException, SemanticException { sessionStateLineageState = lineageState; this.dumpDirectory = dumpDirectory; this.dbNameToLoadIn = dbNameToLoadIn; this.currentReplScope = currentReplScope; this.sourceDbName = sourceDbName; + this.dumpExecutionId = dumpExecutionId; + this.metricCollector = metricCollector; + // If DB name is changed during REPL LOAD, then set it instead of referring to source DB name. if ((currentReplScope != null) && StringUtils.isNotBlank(dbNameToLoadIn)) { @@ -77,7 +86,7 @@ public ReplLoadWork(HiveConf hiveConf, String dumpDirectory, rootTask = null; if (isIncrementalDump) { incrementalLoadTasksBuilder = new IncrementalLoadTasksBuilder(dbNameToLoadIn, dumpDirectory, - new IncrementalLoadEventsIterator(dumpDirectory, hiveConf), hiveConf, eventTo); + new IncrementalLoadEventsIterator(dumpDirectory, hiveConf), hiveConf, eventTo, metricCollector); /* * If the current incremental dump also includes bootstrap for some tables, then create iterator @@ -87,7 +96,8 @@ public ReplLoadWork(HiveConf hiveConf, String dumpDirectory, FileSystem fs = incBootstrapDir.getFileSystem(hiveConf); if (fs.exists(incBootstrapDir)) { this.bootstrapIterator = new BootstrapEventsIterator( - new Path(incBootstrapDir, EximUtil.METADATA_PATH_NAME).toString(), dbNameToLoadIn, true, hiveConf); + new Path(incBootstrapDir, EximUtil.METADATA_PATH_NAME).toString(), dbNameToLoadIn, true, + hiveConf, metricCollector); this.constraintsIterator = new ConstraintEventsIterator(dumpDirectory, hiveConf); } else { this.bootstrapIterator = null; @@ -95,7 +105,7 @@ public ReplLoadWork(HiveConf hiveConf, String dumpDirectory, } } else { this.bootstrapIterator = new BootstrapEventsIterator(new Path(dumpDirectory, EximUtil.METADATA_PATH_NAME) - .toString(), dbNameToLoadIn, true, hiveConf); + .toString(), dbNameToLoadIn, true, hiveConf, metricCollector); this.constraintsIterator = new ConstraintEventsIterator( new Path(dumpDirectory, EximUtil.METADATA_PATH_NAME).toString(), hiveConf); incrementalLoadTasksBuilder = null; @@ -158,4 +168,12 @@ public void setLastReplIDUpdated(boolean lastReplIDUpdated) { public String getSourceDbName() { return sourceDbName; } + + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } + + public Long getDumpExecutionId() { + return dumpExecutionId; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java index 7ade7c07d7..240f5a7db6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hive.ql.exec.repl; +import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.api.StageType; import java.io.Serializable; @@ -34,7 +36,13 @@ @Override public int execute() { - work.replStateLog(); + try { + work.replStateLog(); + } catch (SemanticException e) { + LOG.error("Exception while logging metrics ", e); + setException(e); + return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); + } return 0; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java index 37725d68c6..d4b1e46b55 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java @@ -19,8 +19,13 @@ package org.apache.hadoop.hive.ql.exec.repl; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.utils.StringUtils; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -43,6 +48,7 @@ private TableType tableType; private String functionName; private String lastReplId; + private final transient ReplicationMetricCollector metricCollector; private enum LOG_TYPE { TABLE, @@ -51,50 +57,62 @@ END } - public ReplStateLogWork(ReplLogger replLogger, String eventId, String eventType) { + public ReplStateLogWork(ReplLogger replLogger, ReplicationMetricCollector metricCollector, + String eventId, String eventType) { this.logType = LOG_TYPE.EVENT; this.replLogger = replLogger; this.eventId = eventId; this.eventType = eventType; + this.metricCollector = metricCollector; } - public ReplStateLogWork(ReplLogger replLogger, String tableName, TableType tableType) { + public ReplStateLogWork(ReplLogger replLogger, ReplicationMetricCollector metricCollector, + String tableName, TableType tableType) { this.logType = LOG_TYPE.TABLE; this.replLogger = replLogger; this.tableName = tableName; this.tableType = tableType; + this.metricCollector = metricCollector; } - public ReplStateLogWork(ReplLogger replLogger, String functionName) { + public ReplStateLogWork(ReplLogger replLogger, String functionName, ReplicationMetricCollector metricCollector) { this.logType = LOG_TYPE.FUNCTION; this.replLogger = replLogger; this.functionName = functionName; + this.metricCollector = metricCollector; } - public ReplStateLogWork(ReplLogger replLogger, Map dbProps) { + public ReplStateLogWork(ReplLogger replLogger, Map dbProps, + ReplicationMetricCollector metricCollector) { this.logType = LOG_TYPE.END; this.replLogger = replLogger; this.lastReplId = ReplicationSpec.getLastReplicatedStateFromParameters(dbProps); + this.metricCollector = metricCollector; } - public void replStateLog() { + public void replStateLog() throws SemanticException { switch (logType) { - case TABLE: { - replLogger.tableLog(tableName, tableType); - break; - } - case FUNCTION: { - replLogger.functionLog(functionName); - break; - } - case EVENT: { - replLogger.eventLog(eventId, eventType); - break; - } - case END: { - replLogger.endLog(lastReplId); - break; + case TABLE: + replLogger.tableLog(tableName, tableType); + metricCollector.reportStageProgress("REPL_LOAD", ReplUtils.MetricName.TABLES.name(), 1); + break; + case FUNCTION: + replLogger.functionLog(functionName); + metricCollector.reportStageProgress("REPL_LOAD", ReplUtils.MetricName.FUNCTIONS.name(), 1); + break; + case EVENT: + replLogger.eventLog(eventId, eventType); + metricCollector.reportStageProgress("REPL_LOAD", ReplUtils.MetricName.EVENTS.name(), 1); + break; + case END: + replLogger.endLog(lastReplId); + if (StringUtils.isEmpty(lastReplId)) { + metricCollector.reportStageEnd("REPL_LOAD", Status.SUCCESS); + } else { + metricCollector.reportStageEnd("REPL_LOAD", Status.SUCCESS, Long.parseLong(lastReplId)); } + metricCollector.reportEnd(Status.SUCCESS); + break; } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java index 5bbe20c8c6..99355ec4e0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java @@ -23,13 +23,17 @@ import org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.ReplicationState; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.events.BootstrapEvent; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.load.log.BootstrapLoadLogger; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import java.io.IOException; import java.util.Arrays; import java.util.Iterator; import java.util.List; +import java.util.Map; +import java.util.HashMap; import java.util.function.Consumer; import java.util.stream.Collectors; @@ -76,9 +80,12 @@ private final HiveConf hiveConf; private final boolean needLogger; private ReplLogger replLogger; + private final transient ReplicationMetricCollector metricCollector; - public BootstrapEventsIterator(String dumpDirectory, String dbNameToLoadIn, boolean needLogger, HiveConf hiveConf) + public BootstrapEventsIterator(String dumpDirectory, String dbNameToLoadIn, boolean needLogger, HiveConf hiveConf, + ReplicationMetricCollector metricCollector) throws IOException { + this.metricCollector = metricCollector; Path path = new Path(dumpDirectory); FileSystem fileSystem = path.getFileSystem(hiveConf); if (!fileSystem.exists(path)) { @@ -123,6 +130,7 @@ public boolean hasNext() { if (needLogger) { initReplLogger(); } + initMetricCollector(); } else { return false; } @@ -161,17 +169,16 @@ public ReplLogger replLogger() { return replLogger; } + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } + private void initReplLogger() { try { Path dbDumpPath = currentDatabaseIterator.dbLevelPath(); FileSystem fs = dbDumpPath.getFileSystem(hiveConf); - - long numTables = getSubDirs(fs, dbDumpPath).length; - long numFunctions = 0; - Path funcPath = new Path(dbDumpPath, ReplUtils.FUNCTIONS_ROOT_DIR_NAME); - if (fs.exists(funcPath)) { - numFunctions = getSubDirs(fs, funcPath).length; - } + long numTables = getNumTables(dbDumpPath, fs); + long numFunctions = getNumFunctions(dbDumpPath, fs); String dbName = StringUtils.isBlank(dbNameToLoadIn) ? dbDumpPath.getName() : dbNameToLoadIn; replLogger = new BootstrapLoadLogger(dbName, dumpDirectory, numTables, numFunctions); replLogger.startLog(); @@ -180,6 +187,35 @@ private void initReplLogger() { } } + private long getNumFunctions(Path dbDumpPath, FileSystem fs) throws IOException { + Path funcPath = new Path(dbDumpPath, ReplUtils.FUNCTIONS_ROOT_DIR_NAME); + if (fs.exists(funcPath)) { + return getSubDirs(fs, funcPath).length; + } + return 0; + } + + private long getNumTables(Path dbDumpPath, FileSystem fs) throws IOException { + return getSubDirs(fs, dbDumpPath).length; + } + + private void initMetricCollector() { + try { + Path dbDumpPath = currentDatabaseIterator.dbLevelPath(); + FileSystem fs = dbDumpPath.getFileSystem(hiveConf); + long numTables = getNumTables(dbDumpPath, fs); + long numFunctions = getNumFunctions(dbDumpPath, fs); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) numTables); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) numFunctions); + metricCollector.reportStageStart("REPL_LOAD", metricMap); + } catch (IOException e) { + // Ignore the exception + } catch (SemanticException e) { + throw new RuntimeException("Failed to collect Metrics ", e); + } + } + FileStatus[] getSubDirs(FileSystem fs, Path dirPath) throws IOException { return fs.listStatus(dirPath, new PathFilter() { @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java index 8815eeebe1..667ec7ff31 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.ql.parse.repl.load.MetaData; import org.apache.hadoop.hive.ql.parse.repl.load.message.CreateFunctionHandler; import org.apache.hadoop.hive.ql.parse.repl.load.message.MessageHandler; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -55,19 +56,21 @@ private final FunctionEvent event; private final String dbNameToLoadIn; private final TaskTracker tracker; + private final ReplicationMetricCollector metricCollector; public LoadFunction(Context context, ReplLogger replLogger, FunctionEvent event, - String dbNameToLoadIn, TaskTracker existingTracker) { + String dbNameToLoadIn, TaskTracker existingTracker, ReplicationMetricCollector metricCollector) { this.context = context; this.replLogger = replLogger; this.event = event; this.dbNameToLoadIn = dbNameToLoadIn; this.tracker = new TaskTracker(existingTracker); + this.metricCollector = metricCollector; } private void createFunctionReplLogTask(List> functionTasks, String functionName) { - ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, functionName); + ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, functionName, metricCollector); Task replLogTask = TaskFactory.get(replLogWork, context.hiveConf); DAGTraversal.traverse(functionTasks, new AddDependencyToLeaves(replLogTask)); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java index b36c4a531f..b78df44e84 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; @@ -77,19 +78,22 @@ private final TableEvent event; private final TaskTracker tracker; private final AlterTableAddPartitionDesc lastReplicatedPartition; + private final ReplicationMetricCollector metricCollector; private final ImportTableDesc tableDesc; private Table table; public LoadPartitions(Context context, ReplLogger replLogger, TaskTracker tableTracker, TableEvent event, String dbNameToLoadIn, - TableContext tableContext) throws HiveException { - this(context, replLogger, tableContext, tableTracker, event, dbNameToLoadIn, null); + TableContext tableContext, ReplicationMetricCollector metricCollector) throws HiveException { + this(context, replLogger, tableContext, tableTracker, event, dbNameToLoadIn, null, + metricCollector); } public LoadPartitions(Context context, ReplLogger replLogger, TableContext tableContext, TaskTracker limiter, TableEvent event, String dbNameToLoadIn, - AlterTableAddPartitionDesc lastReplicatedPartition) throws HiveException { + AlterTableAddPartitionDesc lastReplicatedPartition, + ReplicationMetricCollector metricCollector) throws HiveException { this.tracker = new TaskTracker(limiter); this.event = event; this.context = context; @@ -99,6 +103,7 @@ public LoadPartitions(Context context, ReplLogger replLogger, TableContext table this.tableDesc = event.tableDesc(dbNameToLoadIn); this.table = ImportSemanticAnalyzer.tableIfExists(tableDesc, context.hiveDb); + this.metricCollector = metricCollector; } public TaskTracker tasks() throws Exception { @@ -118,7 +123,7 @@ public TaskTracker tasks() throws Exception { if (!forNewTable().hasReplicationState()) { // Add ReplStateLogTask only if no pending table load tasks left for next cycle Task replLogTask - = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf); + = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector); tracker.addDependentTask(replLogTask); } return tracker; @@ -132,7 +137,7 @@ public TaskTracker tasks() throws Exception { if (!forExistingTable(lastReplicatedPartition).hasReplicationState()) { // Add ReplStateLogTask only if no pending table load tasks left for next cycle Task replLogTask - = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf); + = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector); tracker.addDependentTask(replLogTask); } return tracker; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java index 6cea22c01f..9e236fd697 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; @@ -73,14 +74,16 @@ private final TableContext tableContext; private final TaskTracker tracker; private final TableEvent event; + private final ReplicationMetricCollector metricCollector; public LoadTable(TableEvent event, Context context, ReplLogger replLogger, - TableContext tableContext, TaskTracker limiter) { + TableContext tableContext, TaskTracker limiter, ReplicationMetricCollector metricCollector) { this.event = event; this.context = context; this.replLogger = replLogger; this.tableContext = tableContext; this.tracker = new TaskTracker(limiter); + this.metricCollector = metricCollector; } public TaskTracker tasks(boolean isBootstrapDuringInc) throws Exception { @@ -151,7 +154,7 @@ public TaskTracker tasks(boolean isBootstrapDuringInc) throws Exception { ); if (!isPartitioned(tableDesc)) { Task replLogTask - = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf); + = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector); ckptTask.addDependentTask(replLogTask); } tracker.addDependentTask(ckptTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java index 7e844d3164..e4d20d1e75 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork; import org.apache.hadoop.hive.ql.exec.repl.util.AddDependencyToLeaves; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.exec.repl.util.TaskTracker; import org.apache.hadoop.hive.ql.exec.util.DAGTraversal; import org.apache.hadoop.hive.ql.hooks.ReadEntity; @@ -49,6 +50,7 @@ import org.apache.hadoop.hive.ql.parse.repl.load.UpdatedMetaDataTracker; import org.apache.hadoop.hive.ql.parse.repl.load.log.IncrementalLoadLogger; import org.apache.hadoop.hive.ql.parse.repl.load.message.MessageHandler; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.ReplTxnWork; import org.slf4j.Logger; @@ -73,9 +75,12 @@ private final ReplLogger replLogger; private static long numIteration; private final Long eventTo; + private final ReplicationMetricCollector metricCollector; public IncrementalLoadTasksBuilder(String dbName, String loadPath, - IncrementalLoadEventsIterator iterator, HiveConf conf, Long eventTo) { + IncrementalLoadEventsIterator iterator, HiveConf conf, + Long eventTo, + ReplicationMetricCollector metricCollector) { this.dbName = dbName; this.iterator = iterator; inputs = new HashSet<>(); @@ -85,7 +90,8 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, replLogger = new IncrementalLoadLogger(dbName, loadPath, iterator.getNumEvents()); replLogger.startLog(); this.eventTo = eventTo; - numIteration = 0; + setNumIteration(0); + this.metricCollector = metricCollector; } public Task build(Context context, Hive hive, Logger log, @@ -96,7 +102,9 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, this.log = log; numIteration++; this.log.debug("Iteration num " + numIteration); - + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.EVENTS.name(), (long) iterator.getNumEvents()); + this.metricCollector.reportStageStart("REPL_LOAD", metricMap); while (iterator.hasNext() && tracker.canAddMoreTasks()) { FileStatus dir = iterator.next(); String location = dir.getPath().toUri().toString(); @@ -135,7 +143,7 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, List> evTasks = analyzeEventLoad(mhContext); if ((evTasks != null) && (!evTasks.isEmpty())) { - ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, + ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, metricCollector, dir.getPath().getName(), eventDmd.getDumpType().toString()); Task barrierTask = TaskFactory.get(replStateLogWork, conf); @@ -157,7 +165,7 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, Map dbProps = new HashMap<>(); dbProps.put(ReplicationSpec.KEY.CURR_STATE_ID.toString(), String.valueOf(lastReplayedEvent)); - ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, dbProps); + ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, dbProps, metricCollector); Task barrierTask = TaskFactory.get(replStateLogWork, conf); taskChainTail.addDependentTask(barrierTask); this.log.debug("Added {}:{} as a precursor of barrier task {}:{}", @@ -364,6 +372,10 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa return tasks; } + private static void setNumIteration(int count) { + numIteration = count; + } + public Long eventTo() { return eventTo; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java index c0aadb5aa2..1bfe2edf35 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hive.ql.parse.EximUtil; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; import org.apache.hadoop.hive.ql.plan.ReplTxnWork; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; @@ -134,6 +135,13 @@ LOAD_NEW, LOAD_SKIP, LOAD_REPLACE } + /** + * Replication Metrics. + */ + public enum MetricName { + TABLES, FUNCTIONS, EVENTS, POLICIES, TAGS + } + public static Map> genPartSpecs( Table table, List> partitions) throws SemanticException { Map> partSpecs = new HashMap<>(); @@ -167,10 +175,12 @@ return partSpecs; } - public static Task getTableReplLogTask(ImportTableDesc tableDesc, ReplLogger replLogger, HiveConf conf) + public static Task getTableReplLogTask(ImportTableDesc tableDesc, ReplLogger replLogger, HiveConf conf, + ReplicationMetricCollector metricCollector) throws SemanticException { TableType tableType = tableDesc.isExternal() ? TableType.EXTERNAL_TABLE : tableDesc.tableType(); - ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, tableDesc.getTableName(), tableType); + ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, metricCollector, + tableDesc.getTableName(), tableType); return TaskFactory.get(replLogWork, conf); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java index 7959df2b2f..5936b8e809 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java @@ -24,10 +24,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.repl.ReplScope; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.ReplChangeManager; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.exec.Task; @@ -41,6 +43,9 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.BootstrapLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.PlanUtils; import java.io.IOException; @@ -398,7 +403,9 @@ private void analyzeReplLoad(ASTNode ast) throws SemanticException { ReplLoadWork replLoadWork = new ReplLoadWork(conf, loadPath.toString(), sourceDbNameOrPattern, replScope.getDbName(), dmd.getReplScope(), - queryState.getLineageState(), evDump, dmd.getEventTo()); + queryState.getLineageState(), evDump, dmd.getEventTo(), dmd.getDumpExecutionId(), + initMetricCollection(!evDump, loadPath.toString(), replScope.getDbName(), + dmd.getDumpExecutionId())); rootTasks.add(TaskFactory.get(replLoadWork, conf)); } else { LOG.warn("Previous Dump Already Loaded"); @@ -409,6 +416,17 @@ private void analyzeReplLoad(ASTNode ast) throws SemanticException { } } + private ReplicationMetricCollector initMetricCollection(boolean isBootstrap, String dumpDirectory, + String dbNameToLoadIn, long dumpExecutionId) { + ReplicationMetricCollector collector; + if (isBootstrap) { + collector = new BootstrapLoadMetricCollector(dbNameToLoadIn, dumpDirectory, dumpExecutionId, conf); + } else { + collector = new IncrementalLoadMetricCollector(dbNameToLoadIn, dumpDirectory, dumpExecutionId, conf); + } + return collector; + } + private Path getCurrentLoadPath() throws IOException, SemanticException { Path loadPathBase = new Path(conf.getVar(HiveConf.ConfVars.REPLDIR), Base64.getEncoder().encodeToString(sourceDbNameOrPattern.toLowerCase() diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/BootstrapDumpMetricCollector.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/BootstrapDumpMetricCollector.java new file mode 100644 index 0000000000..48a37b6171 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/BootstrapDumpMetricCollector.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.dump.metric; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; + +/** + * BootstrapDumpMetricCollector. + * Bootstrap Dump Metric Collector + */ +public class BootstrapDumpMetricCollector extends ReplicationMetricCollector { + public BootstrapDumpMetricCollector(String dbName, String stagingDir, HiveConf conf) { + super(dbName, Metadata.ReplicationType.BOOTSTRAP, stagingDir, 0, conf); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/IncrementalDumpMetricCollector.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/IncrementalDumpMetricCollector.java new file mode 100644 index 0000000000..f839ec1640 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/IncrementalDumpMetricCollector.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.dump.metric; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; + +/** + * IncrementalDumpMetricCollector. + * Incremental Dump Metric Collector + */ +public class IncrementalDumpMetricCollector extends ReplicationMetricCollector { + public IncrementalDumpMetricCollector(String dbName, String stagingDir, HiveConf conf) { + super(dbName, Metadata.ReplicationType.INCREMENTAL, stagingDir,0, conf); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java index e538c79f34..dc40e1df9a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java @@ -51,6 +51,7 @@ private boolean initialized = false; private final Path dumpFile; private final HiveConf hiveConf; + private Long dumpExecutionId; public DumpMetaData(Path dumpRoot, HiveConf hiveConf) { this.hiveConf = hiveConf; @@ -60,15 +61,16 @@ public DumpMetaData(Path dumpRoot, HiveConf hiveConf) { public DumpMetaData(Path dumpRoot, DumpType lvl, Long eventFrom, Long eventTo, Path cmRoot, HiveConf hiveConf) { this(dumpRoot, hiveConf); - setDump(lvl, eventFrom, eventTo, cmRoot); + setDump(lvl, eventFrom, eventTo, cmRoot, 0L); } - public void setDump(DumpType lvl, Long eventFrom, Long eventTo, Path cmRoot) { + public void setDump(DumpType lvl, Long eventFrom, Long eventTo, Path cmRoot, Long dumpExecutionId) { this.dumpType = lvl; this.eventFrom = eventFrom; this.eventTo = eventTo; this.cmRoot = cmRoot; this.initialized = true; + this.dumpExecutionId = dumpExecutionId; } public void setPayload(String payload) { @@ -115,11 +117,11 @@ private void loadDumpFromFile() throws SemanticException { br = new BufferedReader(new InputStreamReader(fs.open(dumpFile))); String line; if ((line = br.readLine()) != null) { - String[] lineContents = line.split("\t", 5); + String[] lineContents = line.split("\t", 6); setDump(DumpType.valueOf(lineContents[0]), Long.valueOf(lineContents[1]), Long.valueOf(lineContents[2]), - new Path(lineContents[3])); - setPayload(lineContents[4].equals(Utilities.nullStringOutput) ? null : lineContents[4]); + new Path(lineContents[3]), Long.valueOf(lineContents[4])); + setPayload(lineContents[5].equals(Utilities.nullStringOutput) ? null : lineContents[5]); } else { throw new IOException( "Unable to read valid values from dumpFile:" + dumpFile.toUri().toString()); @@ -158,6 +160,11 @@ public Long getEventTo() throws SemanticException { return eventTo; } + public Long getDumpExecutionId() throws SemanticException { + initializeIfNot(); + return dumpExecutionId; + } + public ReplScope getReplScope() throws SemanticException { initializeIfNot(); return replScope; @@ -207,6 +214,7 @@ public void write(boolean replace) throws SemanticException { eventFrom.toString(), eventTo.toString(), cmRoot.toString(), + dumpExecutionId.toString(), payload) ); if (replScope != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/BootstrapLoadMetricCollector.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/BootstrapLoadMetricCollector.java new file mode 100644 index 0000000000..2a1b98e649 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/BootstrapLoadMetricCollector.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.load.metric; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; + +/** + * BootstrapLoadMetricCollector. + * Bootstrap Load Metric Collector + */ +public class BootstrapLoadMetricCollector extends ReplicationMetricCollector { + public BootstrapLoadMetricCollector(String dbName, String stagingDir, long dumpExecutionId, HiveConf conf) { + super(dbName, Metadata.ReplicationType.BOOTSTRAP, stagingDir, dumpExecutionId, conf); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/IncrementalLoadMetricCollector.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/IncrementalLoadMetricCollector.java new file mode 100644 index 0000000000..57c9720acb --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/IncrementalLoadMetricCollector.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.load.metric; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; + +/** + * IncrementalLoadMetricCollector. + * Incremental Load Metric Collector + */ +public class IncrementalLoadMetricCollector extends ReplicationMetricCollector { + public IncrementalLoadMetricCollector(String dbName, String stagingDir, long dumpExecutionId, HiveConf conf) { + super(dbName, Metadata.ReplicationType.INCREMENTAL, stagingDir, dumpExecutionId, conf); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/MetricCollector.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/MetricCollector.java new file mode 100644 index 0000000000..54f1c3126f --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/MetricCollector.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse.repl.metric; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * MetricCollector. + * In memory collection of metrics + */ +public final class MetricCollector { + private static final Logger LOG = LoggerFactory.getLogger(MetricCollector.class); + private Map metricMap = new ConcurrentHashMap<>(); + private long maxSize = (long) MetastoreConf.ConfVars.REPL_METRICS_CACHE_MAXSIZE.getDefaultVal(); + private boolean isInited = false; + private static volatile MetricCollector instance; + + private MetricCollector(){ + } + + public static MetricCollector getInstance() { + if (instance == null) { + synchronized (MetricCollector.class) { + if (instance == null) { + instance = new MetricCollector(); + } + } + } + return instance; + } + + public synchronized MetricCollector init(HiveConf conf) { + //Can initialize the cache only once with a value. + if (!isInited) { + maxSize = MetastoreConf.getLongVar(conf, MetastoreConf.ConfVars.REPL_METRICS_CACHE_MAXSIZE); + isInited = true; + } + return instance; + } + + public synchronized void addMetric(ReplicationMetric replicationMetric) throws SemanticException { + if (metricMap.size() > maxSize) { + throw new SemanticException("Metrics are not getting collected. "); + } else { + if (metricMap.size() > 0.8 * maxSize) { //soft limit + LOG.warn("Metrics cache is more than 80 % full. Will start dropping metrics once full. "); + } + metricMap.put(replicationMetric.getScheduledExecutionId(), replicationMetric); + } + } + + public synchronized List getMetrics() { + List metricList = new ArrayList<>(metricMap.values()); + metricMap.clear(); + return metricList; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/MetricSink.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/MetricSink.java new file mode 100644 index 0000000000..61524fd4b1 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/MetricSink.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.metric; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.ReplicationMetricList; +import org.apache.hadoop.hive.metastore.api.ReplicationMetrics; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.Retry; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * MetricSink. + * Scheduled thread to poll from Metric Collector and persists to DB + */ +public class MetricSink { + private static final Logger LOG = LoggerFactory.getLogger(MetricSink.class); + private ScheduledExecutorService executorService; + private static volatile MetricSink instance; + private boolean isInitialised = false; + private HiveConf conf; + + private MetricSink() { + this.executorService = Executors.newSingleThreadScheduledExecutor(); + } + + public static MetricSink getInstance() { + if (instance == null) { + synchronized (MetricSink.class) { + if (instance == null) { + instance = new MetricSink(); + } + } + } + return instance; + } + + public synchronized void init(HiveConf conf) { + if (!isInitialised) { + this.conf = conf; + long frequencyInMins = MetastoreConf.getLongVar(conf, MetastoreConf.ConfVars.REPL_METRICS_UPDATE_FREQUENCY); + this.executorService.schedule(new MetricSinkWriter(), frequencyInMins, TimeUnit.MINUTES); + isInitialised = true; + } + } + + private class MetricSinkWriter implements Runnable { + private MetricCollector collector; + + // writer instance + + public MetricSinkWriter() { + this.collector = MetricCollector.getInstance(); + } + + @Override + public void run() { + // write metrics and retry + Retry retriable = new Retry(Exception.class) { + @Override + public Void execute() throws Exception { + // get metrics + List metrics = collector.getMetrics(); + ReplicationMetricList metricList = new ReplicationMetricList(); + List replicationMetricsList = new ArrayList<>(); + for (ReplicationMetric metric : metrics) { + ReplicationMetrics persistentMetric = new ReplicationMetrics(); + persistentMetric.setDumpExecutionId(metric.getDumpExecutionId()); + persistentMetric.setScheduledExecutionId(metric.getScheduledExecutionId()); + persistentMetric.setPolicy(metric.getPolicy()); + ObjectMapper mapper = new ObjectMapper(); + persistentMetric.setProgress(mapper.writeValueAsString(metric.getProgress())); + persistentMetric.setMetadata(mapper.writeValueAsString(metric.getMetadata())); + replicationMetricsList.add(persistentMetric); + } + metricList.setReplicationMetricList(replicationMetricsList); + //write + Hive.get(conf).getMSC().addReplicationMetrics(metricList); + return null; + } + }; + try { + retriable.run(); + } catch (Exception e) { + throw new RuntimeException("Metrics are not getting persisted"); + } + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java new file mode 100644 index 0000000000..8ffb85d11f --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.metric; + +import org.apache.hadoop.hive.conf.Constants; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.StringUtils; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Progress; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metric; + +import java.util.Map; + +/** + * Abstract class for Replication Metric Collection. + */ +public abstract class ReplicationMetricCollector { + private ReplicationMetric replicationMetric; + private MetricCollector metricCollector; + private boolean isEnabled; + + public ReplicationMetricCollector(String dbName, Metadata.ReplicationType replicationType, + String stagingDir, long dumpExecutionId, HiveConf conf) { + String policy = conf.get(Constants.SCHEDULED_QUERY_SCHEDULENAME); + long executionId = conf.getLong(Constants.SCHEDULED_QUERY_EXECUTIONID, 0L); + if (!StringUtils.isEmpty(policy) && executionId > 0) { + isEnabled = true; + metricCollector = MetricCollector.getInstance().init(conf); + MetricSink.getInstance().init(conf); + Metadata metadata = new Metadata(dbName, replicationType, stagingDir); + replicationMetric = new ReplicationMetric(executionId, policy, dumpExecutionId, metadata); + } + } + + public void reportStageStart(String stageName, Map metricMap) throws SemanticException { + if (isEnabled) { + Progress progress = replicationMetric.getProgress(); + Stage stage = new Stage(stageName, Status.IN_PROGRESS, System.currentTimeMillis()); + for (Map.Entry metric : metricMap.entrySet()) { + stage.addMetric(new Metric(metric.getKey(), metric.getValue())); + } + progress.addStage(stage); + replicationMetric.setProgress(progress); + metricCollector.addMetric(replicationMetric); + } + } + + + public void reportStageEnd(String stageName, Status status, long lastReplId) throws SemanticException { + if (isEnabled) { + Progress progress = replicationMetric.getProgress(); + Stage stage = progress.getStageByName(stageName); + stage.setStatus(status); + stage.setEndTime(System.currentTimeMillis()); + progress.addStage(stage); + replicationMetric.setProgress(progress); + Metadata metadata = replicationMetric.getMetadata(); + metadata.setLastReplId(lastReplId); + replicationMetric.setMetadata(metadata); + metricCollector.addMetric(replicationMetric); + } + } + + public void reportStageEnd(String stageName, Status status) throws SemanticException { + if (isEnabled) { + Progress progress = replicationMetric.getProgress(); + Stage stage = progress.getStageByName(stageName); + stage.setStatus(status); + stage.setEndTime(System.currentTimeMillis()); + progress.addStage(stage); + replicationMetric.setProgress(progress); + metricCollector.addMetric(replicationMetric); + } + } + + public void reportStageProgress(String stageName, String metricName, long count) throws SemanticException { + if (isEnabled) { + Progress progress = replicationMetric.getProgress(); + Stage stage = progress.getStageByName(stageName); + Metric metric = stage.getMetricByName(metricName); + metric.setCurrentCount(metric.getCurrentCount() + count); + if (metric.getCurrentCount() > metric.getTotalCount()) { + metric.setTotalCount(metric.getCurrentCount()); + } + stage.addMetric(metric); + progress.addStage(stage); + replicationMetric.setProgress(progress); + metricCollector.addMetric(replicationMetric); + } + } + + public void reportEnd(Status status) throws SemanticException { + if (isEnabled) { + Progress progress = replicationMetric.getProgress(); + progress.setStatus(status); + replicationMetric.setProgress(progress); + metricCollector.addMetric(replicationMetric); + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metadata.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metadata.java new file mode 100644 index 0000000000..7688ed6deb --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metadata.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.metric.event; + +/** + * Class for defining the metadata info for replication metrics. + */ +public class Metadata { + /** + * Type of replication. + */ + public enum ReplicationType { + BOOTSTRAP, + INCREMENTAL + } + private String dbName; + private ReplicationType replicationType; + private String stagingDir; + private long lastReplId; + + public Metadata(String dbName, ReplicationType replicationType, String stagingDir) { + this.dbName = dbName; + this.replicationType = replicationType; + this.stagingDir = stagingDir; + } + + public long getLastReplId() { + return lastReplId; + } + + public String getDbName() { + return dbName; + } + + public ReplicationType getReplicationType() { + return replicationType; + } + + public String getStagingDir() { + return stagingDir; + } + + public void setLastReplId(long lastReplId) { + this.lastReplId = lastReplId; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metric.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metric.java new file mode 100644 index 0000000000..fdd6fa403f --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metric.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.metric.event; + +/** + * Class for defining the unit metric. + */ +public class Metric { + private String name; + private long currentCount; + private long totalCount; + + public Metric(String name, long totalCount) { + this.name = name; + this.totalCount = totalCount; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public long getCurrentCount() { + return currentCount; + } + + public void setCurrentCount(long currentCount) { + this.currentCount = currentCount; + } + + public long getTotalCount() { + return totalCount; + } + + public void setTotalCount(long totalCount) { + this.totalCount = totalCount; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Progress.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Progress.java new file mode 100644 index 0000000000..32018198f1 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Progress.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.metric.event; + +import java.util.Map; +import java.util.List; +import java.util.ArrayList; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Class for defining the progress info for replication metrics. + */ +public class Progress { + + private Status status; + + private Map stages = new ConcurrentHashMap<>(); + + public Status getStatus() { + return status; + } + + public void setStatus(Status status) { + this.status = status; + } + + public void addStage(Stage stage) { + stages.put(stage.getName(), stage); + } + + public Stage getStageByName(String stageName) { + return stages.get(stageName); + } + + public List getStages() { + return new ArrayList<>(stages.values()); + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/ReplicationMetric.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/ReplicationMetric.java new file mode 100644 index 0000000000..cc833613be --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/ReplicationMetric.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.metric.event; + +/** + * Class for defining the replication metrics. + */ +public class ReplicationMetric { + private long scheduledExecutionId; + private String policy; + private long dumpExecutionId; + private Metadata metadata; + private Progress progress; + + public ReplicationMetric(long scheduledExecutionId, String policy, long dumpExecutionId, Metadata metadata){ + this.scheduledExecutionId = scheduledExecutionId; + this.policy = policy; + this.dumpExecutionId = dumpExecutionId; + this.metadata = metadata; + this.progress = new Progress(); + } + + public long getScheduledExecutionId() { + return scheduledExecutionId; + } + + + public String getPolicy() { + return policy; + } + + public long getDumpExecutionId() { + return dumpExecutionId; + } + + public Progress getProgress() { + return progress; + } + + public void setMetadata(Metadata metadata) { + this.metadata = metadata; + } + + public Metadata getMetadata() { + return metadata; + } + + public void setProgress(Progress progress) { + this.progress = progress; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Stage.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Stage.java new file mode 100644 index 0000000000..5e87cc2f5b --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Stage.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.metric.event; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Class for defining the different stages of replication. + */ +public class Stage { + private String name; + private Status status; + private long startTime; + private long endTime; + private Map metrics = new HashMap<>(); + + public Stage(String name, Status status, long startTime) { + this.name = name; + this.status = status; + this.startTime = startTime; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public Status getStatus() { + return status; + } + + public void setStatus(Status status) { + this.status = status; + } + + public long getStartTime() { + return startTime; + } + + public void setStartTime(long startTime) { + this.startTime = startTime; + } + + public long getEndTime() { + return endTime; + } + + public void setEndTime(long endTime) { + this.endTime = endTime; + } + + + public void addMetric(Metric metric) { + this.metrics.put(metric.getName(), metric); + } + + public Metric getMetricByName(String name) { + return this.metrics.get(name); + } + + public List getMetrics() { + return new ArrayList<>(metrics.values()); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Status.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Status.java new file mode 100644 index 0000000000..96cf565f76 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Status.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse.repl.metric.event; + +/** + * Enum to define the status. + */ +public enum Status { + SUCCESS, + FAILED, + IN_PROGRESS +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java index 9a20564de9..12e074107c 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.ql.exec.repl.ranger.RangerPolicy; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.repl.ReplState; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -63,11 +64,15 @@ @Mock private RangerDumpWork work; + @Mock + private ReplicationMetricCollector metricCollector; + @Before public void setup() throws Exception { task = new RangerDumpTask(mockClient, conf, work); Mockito.when(mockClient.removeMultiResourcePolicies(Mockito.anyList())).thenCallRealMethod(); Mockito.when(mockClient.checkConnection(Mockito.anyString())).thenReturn(true); + Mockito.when(work.getMetricCollector()).thenReturn(metricCollector); } @Test diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerLoadTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerLoadTask.java index af41e3d773..f3397702c8 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerLoadTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerLoadTask.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.ql.exec.repl.ranger.RangerPolicy; import org.apache.hadoop.hive.ql.exec.repl.ranger.RangerRestClientImpl; import org.apache.hadoop.hive.ql.parse.repl.ReplState; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -61,6 +62,9 @@ @Mock private RangerLoadWork work; + @Mock + private ReplicationMetricCollector metricCollector; + @Before public void setup() throws Exception { task = new RangerLoadTask(mockClient, conf, work); @@ -69,6 +73,7 @@ public void setup() throws Exception { Mockito.when(mockClient.addDenyPolicies(Mockito.anyList(), Mockito.anyString(), Mockito.anyString(), Mockito.anyString())).thenCallRealMethod(); Mockito.when(mockClient.checkConnection(Mockito.anyString())).thenReturn(true); + Mockito.when(work.getMetricCollector()).thenReturn(metricCollector); } @Test diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java new file mode 100644 index 0000000000..ad8c1a3579 --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java @@ -0,0 +1,298 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse.repl.metric; + +import org.apache.hadoop.hive.conf.Constants; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.repl.dump.metric.BootstrapDumpMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.BootstrapLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Progress; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metric; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Map; +import java.util.HashMap; +import java.util.List; +import java.util.Arrays; + +/** + * Unit Test class for In Memory Replication Metric Collection. + */ +@RunWith(MockitoJUnitRunner.class) +public class TestReplicationMetricCollector { + + HiveConf conf; + + @Before + public void setup() throws Exception { + conf = new HiveConf(); + conf.set(Constants.SCHEDULED_QUERY_SCHEDULENAME, "repl"); + conf.set(Constants.SCHEDULED_QUERY_EXECUTIONID, "1"); + MetricCollector.getInstance().init(conf); + } + + @Test + public void testFailureCacheHardLimit() throws Exception { + ReplicationMetricCollector bootstrapDumpMetricCollector = new BootstrapDumpMetricCollector("db", + "staging", conf); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1); + bootstrapDumpMetricCollector.reportStageStart("dump", metricMap); + bootstrapDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS); + + ReplicationMetricCollector incrDumpMetricCollector = new BootstrapDumpMetricCollector("db", + "staging", conf); + metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.EVENTS.name(), (long) 10); + incrDumpMetricCollector.reportStageStart("dump", metricMap); + try { + incrDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS); + Assert.fail(); + } catch (SemanticException e) { + Assert.assertEquals("Metrics are not getting collected. ", e.getMessage()); + } + } + + @Test + public void testFailureNoScheduledId() throws Exception { + ReplicationMetricCollector bootstrapDumpMetricCollector = new BootstrapDumpMetricCollector("db", + "staging", conf); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1); + bootstrapDumpMetricCollector.reportStageStart("dump", metricMap); + bootstrapDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS); + Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size()); + } + + @Test + public void testFailureNoPolicyId() throws Exception { + ReplicationMetricCollector bootstrapDumpMetricCollector = new BootstrapDumpMetricCollector("db", + "staging", conf); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1); + bootstrapDumpMetricCollector.reportStageStart("dump", metricMap); + bootstrapDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS); + Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size()); + } + + @Test + public void testSuccessBootstrapDumpMetrics() throws Exception { + ReplicationMetricCollector bootstrapDumpMetricCollector = new BootstrapDumpMetricCollector("db", + "staging",conf); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1); + bootstrapDumpMetricCollector.reportStageStart("dump", metricMap); + bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 1); + List actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 2); + bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.FUNCTIONS.name(), 1); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + bootstrapDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10); + bootstrapDumpMetricCollector.reportEnd(Status.SUCCESS); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.BOOTSTRAP, "staging"); + expectedMetadata.setLastReplId(10); + Progress expectedProgress = new Progress(); + expectedProgress.setStatus(Status.SUCCESS); + Stage dumpStage = new Stage("dump", Status.SUCCESS, 0); + dumpStage.setEndTime(0); + Metric expectedTableMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 10); + expectedTableMetric.setCurrentCount(3); + Metric expectedFuncMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 1); + expectedFuncMetric.setCurrentCount(1); + dumpStage.addMetric(expectedTableMetric); + dumpStage.addMetric(expectedFuncMetric); + expectedProgress.addStage(dumpStage); + ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 0, expectedMetadata); + expectedMetric.setProgress(expectedProgress); + checkSuccess(actualMetrics.get(0), expectedMetric, "dump", + Arrays.asList(ReplUtils.MetricName.TABLES.name(), ReplUtils.MetricName.FUNCTIONS.name())); + } + + @Test + public void testSuccessIncrDumpMetrics() throws Exception { + ReplicationMetricCollector incrDumpMetricCollector = new IncrementalDumpMetricCollector("db", + "staging", conf); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1); + incrDumpMetricCollector.reportStageStart("dump", metricMap); + incrDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 1); + List actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + incrDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 2); + incrDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.FUNCTIONS.name(), 1); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + incrDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10); + incrDumpMetricCollector.reportEnd(Status.SUCCESS); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.INCREMENTAL, "staging"); + expectedMetadata.setLastReplId(10); + Progress expectedProgress = new Progress(); + expectedProgress.setStatus(Status.SUCCESS); + Stage dumpStage = new Stage("dump", Status.SUCCESS, 0); + dumpStage.setEndTime(0); + Metric expectedTableMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 10); + expectedTableMetric.setCurrentCount(3); + Metric expectedFuncMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 1); + expectedFuncMetric.setCurrentCount(1); + dumpStage.addMetric(expectedTableMetric); + dumpStage.addMetric(expectedFuncMetric); + expectedProgress.addStage(dumpStage); + ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 0, + expectedMetadata); + expectedMetric.setProgress(expectedProgress); + checkSuccess(actualMetrics.get(0), expectedMetric, "dump", + Arrays.asList(ReplUtils.MetricName.TABLES.name(), ReplUtils.MetricName.FUNCTIONS.name())); + } + + @Test + public void testSuccessBootstrapLoadMetrics() throws Exception { + ReplicationMetricCollector bootstrapLoadMetricCollector = new BootstrapLoadMetricCollector("db", + "staging", 1, conf); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1); + bootstrapLoadMetricCollector.reportStageStart("dump", metricMap); + bootstrapLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 1); + List actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + bootstrapLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 2); + bootstrapLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.FUNCTIONS.name(), 1); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + bootstrapLoadMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10); + bootstrapLoadMetricCollector.reportEnd(Status.SUCCESS); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.BOOTSTRAP, "staging"); + expectedMetadata.setLastReplId(10); + Progress expectedProgress = new Progress(); + expectedProgress.setStatus(Status.SUCCESS); + Stage dumpStage = new Stage("dump", Status.SUCCESS, 0); + dumpStage.setEndTime(0); + Metric expectedTableMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 10); + expectedTableMetric.setCurrentCount(3); + Metric expectedFuncMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 1); + expectedFuncMetric.setCurrentCount(1); + dumpStage.addMetric(expectedTableMetric); + dumpStage.addMetric(expectedFuncMetric); + expectedProgress.addStage(dumpStage); + ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 1, + expectedMetadata); + expectedMetric.setProgress(expectedProgress); + checkSuccess(actualMetrics.get(0), expectedMetric, "dump", + Arrays.asList(ReplUtils.MetricName.TABLES.name(), ReplUtils.MetricName.FUNCTIONS.name())); + } + + @Test + public void testSuccessIncrLoadMetrics() throws Exception { + ReplicationMetricCollector incrLoadMetricCollector = new IncrementalLoadMetricCollector("db", + "staging", 1, conf); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1); + incrLoadMetricCollector.reportStageStart("dump", metricMap); + incrLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 1); + List actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + incrLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 2); + incrLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.FUNCTIONS.name(), 1); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + incrLoadMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10); + incrLoadMetricCollector.reportEnd(Status.SUCCESS); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.INCREMENTAL, "staging"); + expectedMetadata.setLastReplId(10); + Progress expectedProgress = new Progress(); + expectedProgress.setStatus(Status.SUCCESS); + Stage dumpStage = new Stage("dump", Status.SUCCESS, 0); + dumpStage.setEndTime(0); + Metric expectedTableMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 10); + expectedTableMetric.setCurrentCount(3); + Metric expectedFuncMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 1); + expectedFuncMetric.setCurrentCount(1); + dumpStage.addMetric(expectedTableMetric); + dumpStage.addMetric(expectedFuncMetric); + expectedProgress.addStage(dumpStage); + ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 1, + expectedMetadata); + expectedMetric.setProgress(expectedProgress); + checkSuccess(actualMetrics.get(0), expectedMetric, "dump", + Arrays.asList(ReplUtils.MetricName.TABLES.name(), ReplUtils.MetricName.FUNCTIONS.name())); + } + + private void checkSuccess(ReplicationMetric actual, ReplicationMetric expected, String stageName, + List metricNames) { + Assert.assertEquals(expected.getDumpExecutionId(), actual.getDumpExecutionId()); + Assert.assertEquals(expected.getPolicy(), actual.getPolicy()); + Assert.assertEquals(expected.getScheduledExecutionId(), actual.getScheduledExecutionId()); + Assert.assertEquals(expected.getMetadata().getReplicationType(), actual.getMetadata().getReplicationType()); + Assert.assertEquals(expected.getMetadata().getDbName(), actual.getMetadata().getDbName()); + Assert.assertEquals(expected.getMetadata().getStagingDir(), actual.getMetadata().getStagingDir()); + Assert.assertEquals(expected.getMetadata().getLastReplId(), actual.getMetadata().getLastReplId()); + Assert.assertEquals(expected.getProgress().getStatus(), actual.getProgress().getStatus()); + Assert.assertEquals(expected.getProgress().getStageByName(stageName).getStatus(), + actual.getProgress().getStageByName(stageName).getStatus()); + for (String metricName : metricNames) { + Assert.assertEquals(expected.getProgress().getStageByName(stageName).getMetricByName(metricName).getTotalCount(), + actual.getProgress().getStageByName(stageName).getMetricByName(metricName).getTotalCount()); + Assert.assertEquals(expected.getProgress().getStageByName(stageName).getMetricByName(metricName) + .getCurrentCount(), actual.getProgress() + .getStageByName(stageName).getMetricByName(metricName).getCurrentCount()); + } + } + +} diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java index 043ec8053c..f387c91fca 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java @@ -877,14 +877,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsRequ case 4: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1144 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list1144.size); - Partition _elem1145; - for (int _i1146 = 0; _i1146 < _list1144.size; ++_i1146) + org.apache.thrift.protocol.TList _list1152 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list1152.size); + Partition _elem1153; + for (int _i1154 = 0; _i1154 < _list1152.size; ++_i1154) { - _elem1145 = new Partition(); - _elem1145.read(iprot); - struct.partitions.add(_elem1145); + _elem1153 = new Partition(); + _elem1153.read(iprot); + struct.partitions.add(_elem1153); } iprot.readListEnd(); } @@ -952,9 +952,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsReq oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter1147 : struct.partitions) + for (Partition _iter1155 : struct.partitions) { - _iter1147.write(oprot); + _iter1155.write(oprot); } oprot.writeListEnd(); } @@ -1000,9 +1000,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequ oprot.writeString(struct.tableName); { oprot.writeI32(struct.partitions.size()); - for (Partition _iter1148 : struct.partitions) + for (Partition _iter1156 : struct.partitions) { - _iter1148.write(oprot); + _iter1156.write(oprot); } } BitSet optionals = new BitSet(); @@ -1041,14 +1041,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsReque struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); { - org.apache.thrift.protocol.TList _list1149 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list1149.size); - Partition _elem1150; - for (int _i1151 = 0; _i1151 < _list1149.size; ++_i1151) + org.apache.thrift.protocol.TList _list1157 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list1157.size); + Partition _elem1158; + for (int _i1159 = 0; _i1159 < _list1157.size; ++_i1159) { - _elem1150 = new Partition(); - _elem1150.read(iprot); - struct.partitions.add(_elem1150); + _elem1158 = new Partition(); + _elem1158.read(iprot); + struct.partitions.add(_elem1158); } } struct.setPartitionsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterTableRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterTableRequest.java index fb64c999c2..0fe9ae0ca6 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterTableRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterTableRequest.java @@ -1073,13 +1073,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AlterTableRequest s case 8: // PROCESSOR_CAPABILITIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1160 = iprot.readListBegin(); - struct.processorCapabilities = new ArrayList(_list1160.size); - String _elem1161; - for (int _i1162 = 0; _i1162 < _list1160.size; ++_i1162) + org.apache.thrift.protocol.TList _list1168 = iprot.readListBegin(); + struct.processorCapabilities = new ArrayList(_list1168.size); + String _elem1169; + for (int _i1170 = 0; _i1170 < _list1168.size; ++_i1170) { - _elem1161 = iprot.readString(); - struct.processorCapabilities.add(_elem1161); + _elem1169 = iprot.readString(); + struct.processorCapabilities.add(_elem1169); } iprot.readListEnd(); } @@ -1155,9 +1155,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AlterTableRequest oprot.writeFieldBegin(PROCESSOR_CAPABILITIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.processorCapabilities.size())); - for (String _iter1163 : struct.processorCapabilities) + for (String _iter1171 : struct.processorCapabilities) { - oprot.writeString(_iter1163); + oprot.writeString(_iter1171); } oprot.writeListEnd(); } @@ -1226,9 +1226,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AlterTableRequest s if (struct.isSetProcessorCapabilities()) { { oprot.writeI32(struct.processorCapabilities.size()); - for (String _iter1164 : struct.processorCapabilities) + for (String _iter1172 : struct.processorCapabilities) { - oprot.writeString(_iter1164); + oprot.writeString(_iter1172); } } } @@ -1267,13 +1267,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AlterTableRequest st } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1165 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.processorCapabilities = new ArrayList(_list1165.size); - String _elem1166; - for (int _i1167 = 0; _i1167 < _list1165.size; ++_i1167) + org.apache.thrift.protocol.TList _list1173 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.processorCapabilities = new ArrayList(_list1173.size); + String _elem1174; + for (int _i1175 = 0; _i1175 < _list1173.size; ++_i1175) { - _elem1166 = iprot.readString(); - struct.processorCapabilities.add(_elem1166); + _elem1174 = iprot.readString(); + struct.processorCapabilities.add(_elem1174); } } struct.setProcessorCapabilitiesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java index 061247f01a..d963eb67cc 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java @@ -444,13 +444,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsFilter case 8: // FILTERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1176 = iprot.readListBegin(); - struct.filters = new ArrayList(_list1176.size); - String _elem1177; - for (int _i1178 = 0; _i1178 < _list1176.size; ++_i1178) + org.apache.thrift.protocol.TList _list1184 = iprot.readListBegin(); + struct.filters = new ArrayList(_list1184.size); + String _elem1185; + for (int _i1186 = 0; _i1186 < _list1184.size; ++_i1186) { - _elem1177 = iprot.readString(); - struct.filters.add(_elem1177); + _elem1185 = iprot.readString(); + struct.filters.add(_elem1185); } iprot.readListEnd(); } @@ -484,9 +484,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsFilte oprot.writeFieldBegin(FILTERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filters.size())); - for (String _iter1179 : struct.filters) + for (String _iter1187 : struct.filters) { - oprot.writeString(_iter1179); + oprot.writeString(_iter1187); } oprot.writeListEnd(); } @@ -524,9 +524,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsFilter if (struct.isSetFilters()) { { oprot.writeI32(struct.filters.size()); - for (String _iter1180 : struct.filters) + for (String _iter1188 : struct.filters) { - oprot.writeString(_iter1180); + oprot.writeString(_iter1188); } } } @@ -542,13 +542,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsFilterS } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1181 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filters = new ArrayList(_list1181.size); - String _elem1182; - for (int _i1183 = 0; _i1183 < _list1181.size; ++_i1183) + org.apache.thrift.protocol.TList _list1189 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filters = new ArrayList(_list1189.size); + String _elem1190; + for (int _i1191 = 0; _i1191 < _list1189.size; ++_i1191) { - _elem1182 = iprot.readString(); - struct.filters.add(_elem1182); + _elem1190 = iprot.readString(); + struct.filters.add(_elem1190); } } struct.setFiltersIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java index cf2f1c290c..faa4ce651f 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java @@ -509,13 +509,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsProjec case 1: // FIELD_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1168 = iprot.readListBegin(); - struct.fieldList = new ArrayList(_list1168.size); - String _elem1169; - for (int _i1170 = 0; _i1170 < _list1168.size; ++_i1170) + org.apache.thrift.protocol.TList _list1176 = iprot.readListBegin(); + struct.fieldList = new ArrayList(_list1176.size); + String _elem1177; + for (int _i1178 = 0; _i1178 < _list1176.size; ++_i1178) { - _elem1169 = iprot.readString(); - struct.fieldList.add(_elem1169); + _elem1177 = iprot.readString(); + struct.fieldList.add(_elem1177); } iprot.readListEnd(); } @@ -557,9 +557,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsProje oprot.writeFieldBegin(FIELD_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.fieldList.size())); - for (String _iter1171 : struct.fieldList) + for (String _iter1179 : struct.fieldList) { - oprot.writeString(_iter1171); + oprot.writeString(_iter1179); } oprot.writeListEnd(); } @@ -606,9 +606,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsProjec if (struct.isSetFieldList()) { { oprot.writeI32(struct.fieldList.size()); - for (String _iter1172 : struct.fieldList) + for (String _iter1180 : struct.fieldList) { - oprot.writeString(_iter1172); + oprot.writeString(_iter1180); } } } @@ -626,13 +626,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsProject BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1173 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.fieldList = new ArrayList(_list1173.size); - String _elem1174; - for (int _i1175 = 0; _i1175 < _list1173.size; ++_i1175) + org.apache.thrift.protocol.TList _list1181 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.fieldList = new ArrayList(_list1181.size); + String _elem1182; + for (int _i1183 = 0; _i1183 < _list1181.size; ++_i1183) { - _elem1174 = iprot.readString(); - struct.fieldList.add(_elem1174); + _elem1182 = iprot.readString(); + struct.fieldList.add(_elem1182); } } struct.setFieldListIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java index c927bbca17..4aafa866e7 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java @@ -1139,13 +1139,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsReques case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1192 = iprot.readListBegin(); - struct.groupNames = new ArrayList(_list1192.size); - String _elem1193; - for (int _i1194 = 0; _i1194 < _list1192.size; ++_i1194) + org.apache.thrift.protocol.TList _list1200 = iprot.readListBegin(); + struct.groupNames = new ArrayList(_list1200.size); + String _elem1201; + for (int _i1202 = 0; _i1202 < _list1200.size; ++_i1202) { - _elem1193 = iprot.readString(); - struct.groupNames.add(_elem1193); + _elem1201 = iprot.readString(); + struct.groupNames.add(_elem1201); } iprot.readListEnd(); } @@ -1175,13 +1175,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsReques case 9: // PROCESSOR_CAPABILITIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1195 = iprot.readListBegin(); - struct.processorCapabilities = new ArrayList(_list1195.size); - String _elem1196; - for (int _i1197 = 0; _i1197 < _list1195.size; ++_i1197) + org.apache.thrift.protocol.TList _list1203 = iprot.readListBegin(); + struct.processorCapabilities = new ArrayList(_list1203.size); + String _elem1204; + for (int _i1205 = 0; _i1205 < _list1203.size; ++_i1205) { - _elem1196 = iprot.readString(); - struct.processorCapabilities.add(_elem1196); + _elem1204 = iprot.readString(); + struct.processorCapabilities.add(_elem1204); } iprot.readListEnd(); } @@ -1245,9 +1245,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsReque oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.groupNames.size())); - for (String _iter1198 : struct.groupNames) + for (String _iter1206 : struct.groupNames) { - oprot.writeString(_iter1198); + oprot.writeString(_iter1206); } oprot.writeListEnd(); } @@ -1269,9 +1269,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsReque oprot.writeFieldBegin(PROCESSOR_CAPABILITIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.processorCapabilities.size())); - for (String _iter1199 : struct.processorCapabilities) + for (String _iter1207 : struct.processorCapabilities) { - oprot.writeString(_iter1199); + oprot.writeString(_iter1207); } oprot.writeListEnd(); } @@ -1352,9 +1352,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsReques if (struct.isSetGroupNames()) { { oprot.writeI32(struct.groupNames.size()); - for (String _iter1200 : struct.groupNames) + for (String _iter1208 : struct.groupNames) { - oprot.writeString(_iter1200); + oprot.writeString(_iter1208); } } } @@ -1367,9 +1367,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsReques if (struct.isSetProcessorCapabilities()) { { oprot.writeI32(struct.processorCapabilities.size()); - for (String _iter1201 : struct.processorCapabilities) + for (String _iter1209 : struct.processorCapabilities) { - oprot.writeString(_iter1201); + oprot.writeString(_iter1209); } } } @@ -1404,13 +1404,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRequest } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1202 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.groupNames = new ArrayList(_list1202.size); - String _elem1203; - for (int _i1204 = 0; _i1204 < _list1202.size; ++_i1204) + org.apache.thrift.protocol.TList _list1210 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.groupNames = new ArrayList(_list1210.size); + String _elem1211; + for (int _i1212 = 0; _i1212 < _list1210.size; ++_i1212) { - _elem1203 = iprot.readString(); - struct.groupNames.add(_elem1203); + _elem1211 = iprot.readString(); + struct.groupNames.add(_elem1211); } } struct.setGroupNamesIsSet(true); @@ -1427,13 +1427,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRequest } if (incoming.get(8)) { { - org.apache.thrift.protocol.TList _list1205 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.processorCapabilities = new ArrayList(_list1205.size); - String _elem1206; - for (int _i1207 = 0; _i1207 < _list1205.size; ++_i1207) + org.apache.thrift.protocol.TList _list1213 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.processorCapabilities = new ArrayList(_list1213.size); + String _elem1214; + for (int _i1215 = 0; _i1215 < _list1213.size; ++_i1215) { - _elem1206 = iprot.readString(); - struct.processorCapabilities.add(_elem1206); + _elem1214 = iprot.readString(); + struct.processorCapabilities.add(_elem1214); } } struct.setProcessorCapabilitiesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java index 342f05e5c3..a29f43ff01 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsRespon case 1: // PARTITION_SPEC if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1184 = iprot.readListBegin(); - struct.partitionSpec = new ArrayList(_list1184.size); - PartitionSpec _elem1185; - for (int _i1186 = 0; _i1186 < _list1184.size; ++_i1186) + org.apache.thrift.protocol.TList _list1192 = iprot.readListBegin(); + struct.partitionSpec = new ArrayList(_list1192.size); + PartitionSpec _elem1193; + for (int _i1194 = 0; _i1194 < _list1192.size; ++_i1194) { - _elem1185 = new PartitionSpec(); - _elem1185.read(iprot); - struct.partitionSpec.add(_elem1185); + _elem1193 = new PartitionSpec(); + _elem1193.read(iprot); + struct.partitionSpec.add(_elem1193); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsRespo oprot.writeFieldBegin(PARTITION_SPEC_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionSpec.size())); - for (PartitionSpec _iter1187 : struct.partitionSpec) + for (PartitionSpec _iter1195 : struct.partitionSpec) { - _iter1187.write(oprot); + _iter1195.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRespon if (struct.isSetPartitionSpec()) { { oprot.writeI32(struct.partitionSpec.size()); - for (PartitionSpec _iter1188 : struct.partitionSpec) + for (PartitionSpec _iter1196 : struct.partitionSpec) { - _iter1188.write(oprot); + _iter1196.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRespons BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1189 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionSpec = new ArrayList(_list1189.size); - PartitionSpec _elem1190; - for (int _i1191 = 0; _i1191 < _list1189.size; ++_i1191) + org.apache.thrift.protocol.TList _list1197 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionSpec = new ArrayList(_list1197.size); + PartitionSpec _elem1198; + for (int _i1199 = 0; _i1199 < _list1197.size; ++_i1199) { - _elem1190 = new PartitionSpec(); - _elem1190.read(iprot); - struct.partitionSpec.add(_elem1190); + _elem1198 = new PartitionSpec(); + _elem1198.read(iprot); + struct.partitionSpec.add(_elem1198); } } struct.setPartitionSpecIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java index e54191ae1c..b8bbb9839f 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java @@ -796,13 +796,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, RenamePartitionRequ case 4: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1152 = iprot.readListBegin(); - struct.partVals = new ArrayList(_list1152.size); - String _elem1153; - for (int _i1154 = 0; _i1154 < _list1152.size; ++_i1154) + org.apache.thrift.protocol.TList _list1160 = iprot.readListBegin(); + struct.partVals = new ArrayList(_list1160.size); + String _elem1161; + for (int _i1162 = 0; _i1162 < _list1160.size; ++_i1162) { - _elem1153 = iprot.readString(); - struct.partVals.add(_elem1153); + _elem1161 = iprot.readString(); + struct.partVals.add(_elem1161); } iprot.readListEnd(); } @@ -862,9 +862,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, RenamePartitionReq oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partVals.size())); - for (String _iter1155 : struct.partVals) + for (String _iter1163 : struct.partVals) { - oprot.writeString(_iter1155); + oprot.writeString(_iter1163); } oprot.writeListEnd(); } @@ -903,9 +903,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, RenamePartitionRequ oprot.writeString(struct.tableName); { oprot.writeI32(struct.partVals.size()); - for (String _iter1156 : struct.partVals) + for (String _iter1164 : struct.partVals) { - oprot.writeString(_iter1156); + oprot.writeString(_iter1164); } } struct.newPart.write(oprot); @@ -933,13 +933,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, RenamePartitionReque struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); { - org.apache.thrift.protocol.TList _list1157 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partVals = new ArrayList(_list1157.size); - String _elem1158; - for (int _i1159 = 0; _i1159 < _list1157.size; ++_i1159) + org.apache.thrift.protocol.TList _list1165 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partVals = new ArrayList(_list1165.size); + String _elem1166; + for (int _i1167 = 0; _i1167 < _list1165.size; ++_i1167) { - _elem1158 = iprot.readString(); - struct.partVals.add(_elem1158); + _elem1166 = iprot.readString(); + struct.partVals.add(_elem1166); } } struct.setPartValsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetricList.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetricList.java new file mode 100644 index 0000000000..043cf1df8c --- /dev/null +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetricList.java @@ -0,0 +1,443 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ReplicationMetricList implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ReplicationMetricList"); + + private static final org.apache.thrift.protocol.TField REPLICATION_METRIC_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("replicationMetricList", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new ReplicationMetricListStandardSchemeFactory()); + schemes.put(TupleScheme.class, new ReplicationMetricListTupleSchemeFactory()); + } + + private List replicationMetricList; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REPLICATION_METRIC_LIST((short)1, "replicationMetricList"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REPLICATION_METRIC_LIST + return REPLICATION_METRIC_LIST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REPLICATION_METRIC_LIST, new org.apache.thrift.meta_data.FieldMetaData("replicationMetricList", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ReplicationMetrics.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ReplicationMetricList.class, metaDataMap); + } + + public ReplicationMetricList() { + } + + public ReplicationMetricList( + List replicationMetricList) + { + this(); + this.replicationMetricList = replicationMetricList; + } + + /** + * Performs a deep copy on other. + */ + public ReplicationMetricList(ReplicationMetricList other) { + if (other.isSetReplicationMetricList()) { + List __this__replicationMetricList = new ArrayList(other.replicationMetricList.size()); + for (ReplicationMetrics other_element : other.replicationMetricList) { + __this__replicationMetricList.add(new ReplicationMetrics(other_element)); + } + this.replicationMetricList = __this__replicationMetricList; + } + } + + public ReplicationMetricList deepCopy() { + return new ReplicationMetricList(this); + } + + @Override + public void clear() { + this.replicationMetricList = null; + } + + public int getReplicationMetricListSize() { + return (this.replicationMetricList == null) ? 0 : this.replicationMetricList.size(); + } + + public java.util.Iterator getReplicationMetricListIterator() { + return (this.replicationMetricList == null) ? null : this.replicationMetricList.iterator(); + } + + public void addToReplicationMetricList(ReplicationMetrics elem) { + if (this.replicationMetricList == null) { + this.replicationMetricList = new ArrayList(); + } + this.replicationMetricList.add(elem); + } + + public List getReplicationMetricList() { + return this.replicationMetricList; + } + + public void setReplicationMetricList(List replicationMetricList) { + this.replicationMetricList = replicationMetricList; + } + + public void unsetReplicationMetricList() { + this.replicationMetricList = null; + } + + /** Returns true if field replicationMetricList is set (has been assigned a value) and false otherwise */ + public boolean isSetReplicationMetricList() { + return this.replicationMetricList != null; + } + + public void setReplicationMetricListIsSet(boolean value) { + if (!value) { + this.replicationMetricList = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REPLICATION_METRIC_LIST: + if (value == null) { + unsetReplicationMetricList(); + } else { + setReplicationMetricList((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REPLICATION_METRIC_LIST: + return getReplicationMetricList(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REPLICATION_METRIC_LIST: + return isSetReplicationMetricList(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof ReplicationMetricList) + return this.equals((ReplicationMetricList)that); + return false; + } + + public boolean equals(ReplicationMetricList that) { + if (that == null) + return false; + + boolean this_present_replicationMetricList = true && this.isSetReplicationMetricList(); + boolean that_present_replicationMetricList = true && that.isSetReplicationMetricList(); + if (this_present_replicationMetricList || that_present_replicationMetricList) { + if (!(this_present_replicationMetricList && that_present_replicationMetricList)) + return false; + if (!this.replicationMetricList.equals(that.replicationMetricList)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_replicationMetricList = true && (isSetReplicationMetricList()); + list.add(present_replicationMetricList); + if (present_replicationMetricList) + list.add(replicationMetricList); + + return list.hashCode(); + } + + @Override + public int compareTo(ReplicationMetricList other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetReplicationMetricList()).compareTo(other.isSetReplicationMetricList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReplicationMetricList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.replicationMetricList, other.replicationMetricList); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("ReplicationMetricList("); + boolean first = true; + + sb.append("replicationMetricList:"); + if (this.replicationMetricList == null) { + sb.append("null"); + } else { + sb.append(this.replicationMetricList); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetReplicationMetricList()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'replicationMetricList' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class ReplicationMetricListStandardSchemeFactory implements SchemeFactory { + public ReplicationMetricListStandardScheme getScheme() { + return new ReplicationMetricListStandardScheme(); + } + } + + private static class ReplicationMetricListStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, ReplicationMetricList struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REPLICATION_METRIC_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1144 = iprot.readListBegin(); + struct.replicationMetricList = new ArrayList(_list1144.size); + ReplicationMetrics _elem1145; + for (int _i1146 = 0; _i1146 < _list1144.size; ++_i1146) + { + _elem1145 = new ReplicationMetrics(); + _elem1145.read(iprot); + struct.replicationMetricList.add(_elem1145); + } + iprot.readListEnd(); + } + struct.setReplicationMetricListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, ReplicationMetricList struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.replicationMetricList != null) { + oprot.writeFieldBegin(REPLICATION_METRIC_LIST_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.replicationMetricList.size())); + for (ReplicationMetrics _iter1147 : struct.replicationMetricList) + { + _iter1147.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class ReplicationMetricListTupleSchemeFactory implements SchemeFactory { + public ReplicationMetricListTupleScheme getScheme() { + return new ReplicationMetricListTupleScheme(); + } + } + + private static class ReplicationMetricListTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, ReplicationMetricList struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.replicationMetricList.size()); + for (ReplicationMetrics _iter1148 : struct.replicationMetricList) + { + _iter1148.write(oprot); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, ReplicationMetricList struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list1149 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.replicationMetricList = new ArrayList(_list1149.size); + ReplicationMetrics _elem1150; + for (int _i1151 = 0; _i1151 < _list1149.size; ++_i1151) + { + _elem1150 = new ReplicationMetrics(); + _elem1150.read(iprot); + struct.replicationMetricList.add(_elem1150); + } + } + struct.setReplicationMetricListIsSet(true); + } + } + +} + diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetrics.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetrics.java new file mode 100644 index 0000000000..5ee5cac7ca --- /dev/null +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetrics.java @@ -0,0 +1,799 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ReplicationMetrics implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ReplicationMetrics"); + + private static final org.apache.thrift.protocol.TField SCHEDULED_EXECUTION_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("scheduledExecutionId", org.apache.thrift.protocol.TType.I64, (short)1); + private static final org.apache.thrift.protocol.TField POLICY_FIELD_DESC = new org.apache.thrift.protocol.TField("policy", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField DUMP_EXECUTION_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("dumpExecutionId", org.apache.thrift.protocol.TType.I64, (short)3); + private static final org.apache.thrift.protocol.TField METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("metadata", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField PROGRESS_FIELD_DESC = new org.apache.thrift.protocol.TField("progress", org.apache.thrift.protocol.TType.STRING, (short)5); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new ReplicationMetricsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new ReplicationMetricsTupleSchemeFactory()); + } + + private long scheduledExecutionId; // required + private String policy; // required + private long dumpExecutionId; // required + private String metadata; // optional + private String progress; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SCHEDULED_EXECUTION_ID((short)1, "scheduledExecutionId"), + POLICY((short)2, "policy"), + DUMP_EXECUTION_ID((short)3, "dumpExecutionId"), + METADATA((short)4, "metadata"), + PROGRESS((short)5, "progress"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // SCHEDULED_EXECUTION_ID + return SCHEDULED_EXECUTION_ID; + case 2: // POLICY + return POLICY; + case 3: // DUMP_EXECUTION_ID + return DUMP_EXECUTION_ID; + case 4: // METADATA + return METADATA; + case 5: // PROGRESS + return PROGRESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __SCHEDULEDEXECUTIONID_ISSET_ID = 0; + private static final int __DUMPEXECUTIONID_ISSET_ID = 1; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.METADATA,_Fields.PROGRESS}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SCHEDULED_EXECUTION_ID, new org.apache.thrift.meta_data.FieldMetaData("scheduledExecutionId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.POLICY, new org.apache.thrift.meta_data.FieldMetaData("policy", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.DUMP_EXECUTION_ID, new org.apache.thrift.meta_data.FieldMetaData("dumpExecutionId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.METADATA, new org.apache.thrift.meta_data.FieldMetaData("metadata", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PROGRESS, new org.apache.thrift.meta_data.FieldMetaData("progress", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ReplicationMetrics.class, metaDataMap); + } + + public ReplicationMetrics() { + } + + public ReplicationMetrics( + long scheduledExecutionId, + String policy, + long dumpExecutionId) + { + this(); + this.scheduledExecutionId = scheduledExecutionId; + setScheduledExecutionIdIsSet(true); + this.policy = policy; + this.dumpExecutionId = dumpExecutionId; + setDumpExecutionIdIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public ReplicationMetrics(ReplicationMetrics other) { + __isset_bitfield = other.__isset_bitfield; + this.scheduledExecutionId = other.scheduledExecutionId; + if (other.isSetPolicy()) { + this.policy = other.policy; + } + this.dumpExecutionId = other.dumpExecutionId; + if (other.isSetMetadata()) { + this.metadata = other.metadata; + } + if (other.isSetProgress()) { + this.progress = other.progress; + } + } + + public ReplicationMetrics deepCopy() { + return new ReplicationMetrics(this); + } + + @Override + public void clear() { + setScheduledExecutionIdIsSet(false); + this.scheduledExecutionId = 0; + this.policy = null; + setDumpExecutionIdIsSet(false); + this.dumpExecutionId = 0; + this.metadata = null; + this.progress = null; + } + + public long getScheduledExecutionId() { + return this.scheduledExecutionId; + } + + public void setScheduledExecutionId(long scheduledExecutionId) { + this.scheduledExecutionId = scheduledExecutionId; + setScheduledExecutionIdIsSet(true); + } + + public void unsetScheduledExecutionId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SCHEDULEDEXECUTIONID_ISSET_ID); + } + + /** Returns true if field scheduledExecutionId is set (has been assigned a value) and false otherwise */ + public boolean isSetScheduledExecutionId() { + return EncodingUtils.testBit(__isset_bitfield, __SCHEDULEDEXECUTIONID_ISSET_ID); + } + + public void setScheduledExecutionIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SCHEDULEDEXECUTIONID_ISSET_ID, value); + } + + public String getPolicy() { + return this.policy; + } + + public void setPolicy(String policy) { + this.policy = policy; + } + + public void unsetPolicy() { + this.policy = null; + } + + /** Returns true if field policy is set (has been assigned a value) and false otherwise */ + public boolean isSetPolicy() { + return this.policy != null; + } + + public void setPolicyIsSet(boolean value) { + if (!value) { + this.policy = null; + } + } + + public long getDumpExecutionId() { + return this.dumpExecutionId; + } + + public void setDumpExecutionId(long dumpExecutionId) { + this.dumpExecutionId = dumpExecutionId; + setDumpExecutionIdIsSet(true); + } + + public void unsetDumpExecutionId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DUMPEXECUTIONID_ISSET_ID); + } + + /** Returns true if field dumpExecutionId is set (has been assigned a value) and false otherwise */ + public boolean isSetDumpExecutionId() { + return EncodingUtils.testBit(__isset_bitfield, __DUMPEXECUTIONID_ISSET_ID); + } + + public void setDumpExecutionIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DUMPEXECUTIONID_ISSET_ID, value); + } + + public String getMetadata() { + return this.metadata; + } + + public void setMetadata(String metadata) { + this.metadata = metadata; + } + + public void unsetMetadata() { + this.metadata = null; + } + + /** Returns true if field metadata is set (has been assigned a value) and false otherwise */ + public boolean isSetMetadata() { + return this.metadata != null; + } + + public void setMetadataIsSet(boolean value) { + if (!value) { + this.metadata = null; + } + } + + public String getProgress() { + return this.progress; + } + + public void setProgress(String progress) { + this.progress = progress; + } + + public void unsetProgress() { + this.progress = null; + } + + /** Returns true if field progress is set (has been assigned a value) and false otherwise */ + public boolean isSetProgress() { + return this.progress != null; + } + + public void setProgressIsSet(boolean value) { + if (!value) { + this.progress = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SCHEDULED_EXECUTION_ID: + if (value == null) { + unsetScheduledExecutionId(); + } else { + setScheduledExecutionId((Long)value); + } + break; + + case POLICY: + if (value == null) { + unsetPolicy(); + } else { + setPolicy((String)value); + } + break; + + case DUMP_EXECUTION_ID: + if (value == null) { + unsetDumpExecutionId(); + } else { + setDumpExecutionId((Long)value); + } + break; + + case METADATA: + if (value == null) { + unsetMetadata(); + } else { + setMetadata((String)value); + } + break; + + case PROGRESS: + if (value == null) { + unsetProgress(); + } else { + setProgress((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SCHEDULED_EXECUTION_ID: + return getScheduledExecutionId(); + + case POLICY: + return getPolicy(); + + case DUMP_EXECUTION_ID: + return getDumpExecutionId(); + + case METADATA: + return getMetadata(); + + case PROGRESS: + return getProgress(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SCHEDULED_EXECUTION_ID: + return isSetScheduledExecutionId(); + case POLICY: + return isSetPolicy(); + case DUMP_EXECUTION_ID: + return isSetDumpExecutionId(); + case METADATA: + return isSetMetadata(); + case PROGRESS: + return isSetProgress(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof ReplicationMetrics) + return this.equals((ReplicationMetrics)that); + return false; + } + + public boolean equals(ReplicationMetrics that) { + if (that == null) + return false; + + boolean this_present_scheduledExecutionId = true; + boolean that_present_scheduledExecutionId = true; + if (this_present_scheduledExecutionId || that_present_scheduledExecutionId) { + if (!(this_present_scheduledExecutionId && that_present_scheduledExecutionId)) + return false; + if (this.scheduledExecutionId != that.scheduledExecutionId) + return false; + } + + boolean this_present_policy = true && this.isSetPolicy(); + boolean that_present_policy = true && that.isSetPolicy(); + if (this_present_policy || that_present_policy) { + if (!(this_present_policy && that_present_policy)) + return false; + if (!this.policy.equals(that.policy)) + return false; + } + + boolean this_present_dumpExecutionId = true; + boolean that_present_dumpExecutionId = true; + if (this_present_dumpExecutionId || that_present_dumpExecutionId) { + if (!(this_present_dumpExecutionId && that_present_dumpExecutionId)) + return false; + if (this.dumpExecutionId != that.dumpExecutionId) + return false; + } + + boolean this_present_metadata = true && this.isSetMetadata(); + boolean that_present_metadata = true && that.isSetMetadata(); + if (this_present_metadata || that_present_metadata) { + if (!(this_present_metadata && that_present_metadata)) + return false; + if (!this.metadata.equals(that.metadata)) + return false; + } + + boolean this_present_progress = true && this.isSetProgress(); + boolean that_present_progress = true && that.isSetProgress(); + if (this_present_progress || that_present_progress) { + if (!(this_present_progress && that_present_progress)) + return false; + if (!this.progress.equals(that.progress)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_scheduledExecutionId = true; + list.add(present_scheduledExecutionId); + if (present_scheduledExecutionId) + list.add(scheduledExecutionId); + + boolean present_policy = true && (isSetPolicy()); + list.add(present_policy); + if (present_policy) + list.add(policy); + + boolean present_dumpExecutionId = true; + list.add(present_dumpExecutionId); + if (present_dumpExecutionId) + list.add(dumpExecutionId); + + boolean present_metadata = true && (isSetMetadata()); + list.add(present_metadata); + if (present_metadata) + list.add(metadata); + + boolean present_progress = true && (isSetProgress()); + list.add(present_progress); + if (present_progress) + list.add(progress); + + return list.hashCode(); + } + + @Override + public int compareTo(ReplicationMetrics other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetScheduledExecutionId()).compareTo(other.isSetScheduledExecutionId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetScheduledExecutionId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.scheduledExecutionId, other.scheduledExecutionId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPolicy()).compareTo(other.isSetPolicy()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPolicy()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.policy, other.policy); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDumpExecutionId()).compareTo(other.isSetDumpExecutionId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDumpExecutionId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dumpExecutionId, other.dumpExecutionId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMetadata()).compareTo(other.isSetMetadata()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMetadata()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.metadata, other.metadata); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetProgress()).compareTo(other.isSetProgress()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetProgress()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.progress, other.progress); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("ReplicationMetrics("); + boolean first = true; + + sb.append("scheduledExecutionId:"); + sb.append(this.scheduledExecutionId); + first = false; + if (!first) sb.append(", "); + sb.append("policy:"); + if (this.policy == null) { + sb.append("null"); + } else { + sb.append(this.policy); + } + first = false; + if (!first) sb.append(", "); + sb.append("dumpExecutionId:"); + sb.append(this.dumpExecutionId); + first = false; + if (isSetMetadata()) { + if (!first) sb.append(", "); + sb.append("metadata:"); + if (this.metadata == null) { + sb.append("null"); + } else { + sb.append(this.metadata); + } + first = false; + } + if (isSetProgress()) { + if (!first) sb.append(", "); + sb.append("progress:"); + if (this.progress == null) { + sb.append("null"); + } else { + sb.append(this.progress); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetScheduledExecutionId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'scheduledExecutionId' is unset! Struct:" + toString()); + } + + if (!isSetPolicy()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'policy' is unset! Struct:" + toString()); + } + + if (!isSetDumpExecutionId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dumpExecutionId' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class ReplicationMetricsStandardSchemeFactory implements SchemeFactory { + public ReplicationMetricsStandardScheme getScheme() { + return new ReplicationMetricsStandardScheme(); + } + } + + private static class ReplicationMetricsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, ReplicationMetrics struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // SCHEDULED_EXECUTION_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.scheduledExecutionId = iprot.readI64(); + struct.setScheduledExecutionIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // POLICY + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.policy = iprot.readString(); + struct.setPolicyIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // DUMP_EXECUTION_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.dumpExecutionId = iprot.readI64(); + struct.setDumpExecutionIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // METADATA + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.metadata = iprot.readString(); + struct.setMetadataIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // PROGRESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.progress = iprot.readString(); + struct.setProgressIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, ReplicationMetrics struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(SCHEDULED_EXECUTION_ID_FIELD_DESC); + oprot.writeI64(struct.scheduledExecutionId); + oprot.writeFieldEnd(); + if (struct.policy != null) { + oprot.writeFieldBegin(POLICY_FIELD_DESC); + oprot.writeString(struct.policy); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(DUMP_EXECUTION_ID_FIELD_DESC); + oprot.writeI64(struct.dumpExecutionId); + oprot.writeFieldEnd(); + if (struct.metadata != null) { + if (struct.isSetMetadata()) { + oprot.writeFieldBegin(METADATA_FIELD_DESC); + oprot.writeString(struct.metadata); + oprot.writeFieldEnd(); + } + } + if (struct.progress != null) { + if (struct.isSetProgress()) { + oprot.writeFieldBegin(PROGRESS_FIELD_DESC); + oprot.writeString(struct.progress); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class ReplicationMetricsTupleSchemeFactory implements SchemeFactory { + public ReplicationMetricsTupleScheme getScheme() { + return new ReplicationMetricsTupleScheme(); + } + } + + private static class ReplicationMetricsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, ReplicationMetrics struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.scheduledExecutionId); + oprot.writeString(struct.policy); + oprot.writeI64(struct.dumpExecutionId); + BitSet optionals = new BitSet(); + if (struct.isSetMetadata()) { + optionals.set(0); + } + if (struct.isSetProgress()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetMetadata()) { + oprot.writeString(struct.metadata); + } + if (struct.isSetProgress()) { + oprot.writeString(struct.progress); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, ReplicationMetrics struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.scheduledExecutionId = iprot.readI64(); + struct.setScheduledExecutionIdIsSet(true); + struct.policy = iprot.readString(); + struct.setPolicyIsSet(true); + struct.dumpExecutionId = iprot.readI64(); + struct.setDumpExecutionIdIsSet(true); + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.metadata = iprot.readString(); + struct.setMetadataIsSet(true); + } + if (incoming.get(1)) { + struct.progress = iprot.readString(); + struct.setProgressIsSet(true); + } + } + } + +} + diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 5fd34d728b..8682e83f71 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -508,6 +508,8 @@ public ScheduledQuery get_scheduled_query(ScheduledQueryKey scheduleKey) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public void add_replication_metrics(ReplicationMetricList replicationMetricList) throws MetaException, org.apache.thrift.TException; + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface { @@ -978,6 +980,8 @@ public void get_scheduled_query(ScheduledQueryKey scheduleKey, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void add_replication_metrics(ReplicationMetricList replicationMetricList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface { @@ -7648,6 +7652,29 @@ public ScheduledQuery recv_get_scheduled_query() throws MetaException, NoSuchObj throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_scheduled_query failed: unknown result"); } + public void add_replication_metrics(ReplicationMetricList replicationMetricList) throws MetaException, org.apache.thrift.TException + { + send_add_replication_metrics(replicationMetricList); + recv_add_replication_metrics(); + } + + public void send_add_replication_metrics(ReplicationMetricList replicationMetricList) throws org.apache.thrift.TException + { + add_replication_metrics_args args = new add_replication_metrics_args(); + args.setReplicationMetricList(replicationMetricList); + sendBase("add_replication_metrics", args); + } + + public void recv_add_replication_metrics() throws MetaException, org.apache.thrift.TException + { + add_replication_metrics_result result = new add_replication_metrics_result(); + receiveBase(result, "add_replication_metrics"); + if (result.o1 != null) { + throw result.o1; + } + return; + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class AsyncClient extends com.facebook.fb303.FacebookService.AsyncClient implements AsyncIface { @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { @@ -15623,6 +15650,38 @@ public ScheduledQuery getResult() throws MetaException, NoSuchObjectException, o } } + public void add_replication_metrics(ReplicationMetricList replicationMetricList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + add_replication_metrics_call method_call = new add_replication_metrics_call(replicationMetricList, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_replication_metrics_call extends org.apache.thrift.async.TAsyncMethodCall { + private ReplicationMetricList replicationMetricList; + public add_replication_metrics_call(ReplicationMetricList replicationMetricList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.replicationMetricList = replicationMetricList; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("add_replication_metrics", org.apache.thrift.protocol.TMessageType.CALL, 0)); + add_replication_metrics_args args = new add_replication_metrics_args(); + args.setReplicationMetricList(replicationMetricList); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_add_replication_metrics(); + } + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Processor extends com.facebook.fb303.FacebookService.Processor implements org.apache.thrift.TProcessor { @@ -15869,6 +15928,7 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public add_replication_metrics() { + super("add_replication_metrics"); + } + + public add_replication_metrics_args getEmptyArgsInstance() { + return new add_replication_metrics_args(); + } + + protected boolean isOneway() { + return false; + } + + public add_replication_metrics_result getResult(I iface, add_replication_metrics_args args) throws org.apache.thrift.TException { + add_replication_metrics_result result = new add_replication_metrics_result(); + try { + iface.add_replication_metrics(args.replicationMetricList); + } catch (MetaException o1) { + result.o1 = o1; + } + return result; + } + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class AsyncProcessor extends com.facebook.fb303.FacebookService.AsyncProcessor { @@ -22078,6 +22162,7 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction { + public add_replication_metrics() { + super("add_replication_metrics"); + } + + public add_replication_metrics_args getEmptyArgsInstance() { + return new add_replication_metrics_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + add_replication_metrics_result result = new add_replication_metrics_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + add_replication_metrics_result result = new add_replication_metrics_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, add_replication_metrics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.add_replication_metrics(args.replicationMetricList,resultHandler); + } + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class getMetaConf_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { @@ -47098,13 +47239,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1208 = iprot.readListBegin(); - struct.success = new ArrayList(_list1208.size); - String _elem1209; - for (int _i1210 = 0; _i1210 < _list1208.size; ++_i1210) + org.apache.thrift.protocol.TList _list1216 = iprot.readListBegin(); + struct.success = new ArrayList(_list1216.size); + String _elem1217; + for (int _i1218 = 0; _i1218 < _list1216.size; ++_i1218) { - _elem1209 = iprot.readString(); - struct.success.add(_elem1209); + _elem1217 = iprot.readString(); + struct.success.add(_elem1217); } iprot.readListEnd(); } @@ -47139,9 +47280,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1211 : struct.success) + for (String _iter1219 : struct.success) { - oprot.writeString(_iter1211); + oprot.writeString(_iter1219); } oprot.writeListEnd(); } @@ -47180,9 +47321,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1212 : struct.success) + for (String _iter1220 : struct.success) { - oprot.writeString(_iter1212); + oprot.writeString(_iter1220); } } } @@ -47197,13 +47338,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1213 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1213.size); - String _elem1214; - for (int _i1215 = 0; _i1215 < _list1213.size; ++_i1215) + org.apache.thrift.protocol.TList _list1221 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1221.size); + String _elem1222; + for (int _i1223 = 0; _i1223 < _list1221.size; ++_i1223) { - _elem1214 = iprot.readString(); - struct.success.add(_elem1214); + _elem1222 = iprot.readString(); + struct.success.add(_elem1222); } } struct.setSuccessIsSet(true); @@ -47857,13 +47998,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1216 = iprot.readListBegin(); - struct.success = new ArrayList(_list1216.size); - String _elem1217; - for (int _i1218 = 0; _i1218 < _list1216.size; ++_i1218) + org.apache.thrift.protocol.TList _list1224 = iprot.readListBegin(); + struct.success = new ArrayList(_list1224.size); + String _elem1225; + for (int _i1226 = 0; _i1226 < _list1224.size; ++_i1226) { - _elem1217 = iprot.readString(); - struct.success.add(_elem1217); + _elem1225 = iprot.readString(); + struct.success.add(_elem1225); } iprot.readListEnd(); } @@ -47898,9 +48039,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1219 : struct.success) + for (String _iter1227 : struct.success) { - oprot.writeString(_iter1219); + oprot.writeString(_iter1227); } oprot.writeListEnd(); } @@ -47939,9 +48080,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1220 : struct.success) + for (String _iter1228 : struct.success) { - oprot.writeString(_iter1220); + oprot.writeString(_iter1228); } } } @@ -47956,13 +48097,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1221 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1221.size); - String _elem1222; - for (int _i1223 = 0; _i1223 < _list1221.size; ++_i1223) + org.apache.thrift.protocol.TList _list1229 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1229.size); + String _elem1230; + for (int _i1231 = 0; _i1231 < _list1229.size; ++_i1231) { - _elem1222 = iprot.readString(); - struct.success.add(_elem1222); + _elem1230 = iprot.readString(); + struct.success.add(_elem1230); } } struct.setSuccessIsSet(true); @@ -52569,16 +52710,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1224 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1224.size); - String _key1225; - Type _val1226; - for (int _i1227 = 0; _i1227 < _map1224.size; ++_i1227) + org.apache.thrift.protocol.TMap _map1232 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1232.size); + String _key1233; + Type _val1234; + for (int _i1235 = 0; _i1235 < _map1232.size; ++_i1235) { - _key1225 = iprot.readString(); - _val1226 = new Type(); - _val1226.read(iprot); - struct.success.put(_key1225, _val1226); + _key1233 = iprot.readString(); + _val1234 = new Type(); + _val1234.read(iprot); + struct.success.put(_key1233, _val1234); } iprot.readMapEnd(); } @@ -52613,10 +52754,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter1228 : struct.success.entrySet()) + for (Map.Entry _iter1236 : struct.success.entrySet()) { - oprot.writeString(_iter1228.getKey()); - _iter1228.getValue().write(oprot); + oprot.writeString(_iter1236.getKey()); + _iter1236.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -52655,10 +52796,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1229 : struct.success.entrySet()) + for (Map.Entry _iter1237 : struct.success.entrySet()) { - oprot.writeString(_iter1229.getKey()); - _iter1229.getValue().write(oprot); + oprot.writeString(_iter1237.getKey()); + _iter1237.getValue().write(oprot); } } } @@ -52673,16 +52814,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1230 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map1230.size); - String _key1231; - Type _val1232; - for (int _i1233 = 0; _i1233 < _map1230.size; ++_i1233) + org.apache.thrift.protocol.TMap _map1238 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map1238.size); + String _key1239; + Type _val1240; + for (int _i1241 = 0; _i1241 < _map1238.size; ++_i1241) { - _key1231 = iprot.readString(); - _val1232 = new Type(); - _val1232.read(iprot); - struct.success.put(_key1231, _val1232); + _key1239 = iprot.readString(); + _val1240 = new Type(); + _val1240.read(iprot); + struct.success.put(_key1239, _val1240); } } struct.setSuccessIsSet(true); @@ -53717,14 +53858,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1234 = iprot.readListBegin(); - struct.success = new ArrayList(_list1234.size); - FieldSchema _elem1235; - for (int _i1236 = 0; _i1236 < _list1234.size; ++_i1236) + org.apache.thrift.protocol.TList _list1242 = iprot.readListBegin(); + struct.success = new ArrayList(_list1242.size); + FieldSchema _elem1243; + for (int _i1244 = 0; _i1244 < _list1242.size; ++_i1244) { - _elem1235 = new FieldSchema(); - _elem1235.read(iprot); - struct.success.add(_elem1235); + _elem1243 = new FieldSchema(); + _elem1243.read(iprot); + struct.success.add(_elem1243); } iprot.readListEnd(); } @@ -53777,9 +53918,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1237 : struct.success) + for (FieldSchema _iter1245 : struct.success) { - _iter1237.write(oprot); + _iter1245.write(oprot); } oprot.writeListEnd(); } @@ -53834,9 +53975,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1238 : struct.success) + for (FieldSchema _iter1246 : struct.success) { - _iter1238.write(oprot); + _iter1246.write(oprot); } } } @@ -53857,14 +53998,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1239 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1239.size); - FieldSchema _elem1240; - for (int _i1241 = 0; _i1241 < _list1239.size; ++_i1241) + org.apache.thrift.protocol.TList _list1247 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1247.size); + FieldSchema _elem1248; + for (int _i1249 = 0; _i1249 < _list1247.size; ++_i1249) { - _elem1240 = new FieldSchema(); - _elem1240.read(iprot); - struct.success.add(_elem1240); + _elem1248 = new FieldSchema(); + _elem1248.read(iprot); + struct.success.add(_elem1248); } } struct.setSuccessIsSet(true); @@ -55018,14 +55159,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1242 = iprot.readListBegin(); - struct.success = new ArrayList(_list1242.size); - FieldSchema _elem1243; - for (int _i1244 = 0; _i1244 < _list1242.size; ++_i1244) + org.apache.thrift.protocol.TList _list1250 = iprot.readListBegin(); + struct.success = new ArrayList(_list1250.size); + FieldSchema _elem1251; + for (int _i1252 = 0; _i1252 < _list1250.size; ++_i1252) { - _elem1243 = new FieldSchema(); - _elem1243.read(iprot); - struct.success.add(_elem1243); + _elem1251 = new FieldSchema(); + _elem1251.read(iprot); + struct.success.add(_elem1251); } iprot.readListEnd(); } @@ -55078,9 +55219,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1245 : struct.success) + for (FieldSchema _iter1253 : struct.success) { - _iter1245.write(oprot); + _iter1253.write(oprot); } oprot.writeListEnd(); } @@ -55135,9 +55276,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1246 : struct.success) + for (FieldSchema _iter1254 : struct.success) { - _iter1246.write(oprot); + _iter1254.write(oprot); } } } @@ -55158,14 +55299,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1247 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1247.size); - FieldSchema _elem1248; - for (int _i1249 = 0; _i1249 < _list1247.size; ++_i1249) + org.apache.thrift.protocol.TList _list1255 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1255.size); + FieldSchema _elem1256; + for (int _i1257 = 0; _i1257 < _list1255.size; ++_i1257) { - _elem1248 = new FieldSchema(); - _elem1248.read(iprot); - struct.success.add(_elem1248); + _elem1256 = new FieldSchema(); + _elem1256.read(iprot); + struct.success.add(_elem1256); } } struct.setSuccessIsSet(true); @@ -56210,14 +56351,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1250 = iprot.readListBegin(); - struct.success = new ArrayList(_list1250.size); - FieldSchema _elem1251; - for (int _i1252 = 0; _i1252 < _list1250.size; ++_i1252) + org.apache.thrift.protocol.TList _list1258 = iprot.readListBegin(); + struct.success = new ArrayList(_list1258.size); + FieldSchema _elem1259; + for (int _i1260 = 0; _i1260 < _list1258.size; ++_i1260) { - _elem1251 = new FieldSchema(); - _elem1251.read(iprot); - struct.success.add(_elem1251); + _elem1259 = new FieldSchema(); + _elem1259.read(iprot); + struct.success.add(_elem1259); } iprot.readListEnd(); } @@ -56270,9 +56411,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1253 : struct.success) + for (FieldSchema _iter1261 : struct.success) { - _iter1253.write(oprot); + _iter1261.write(oprot); } oprot.writeListEnd(); } @@ -56327,9 +56468,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1254 : struct.success) + for (FieldSchema _iter1262 : struct.success) { - _iter1254.write(oprot); + _iter1262.write(oprot); } } } @@ -56350,14 +56491,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1255 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1255.size); - FieldSchema _elem1256; - for (int _i1257 = 0; _i1257 < _list1255.size; ++_i1257) + org.apache.thrift.protocol.TList _list1263 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1263.size); + FieldSchema _elem1264; + for (int _i1265 = 0; _i1265 < _list1263.size; ++_i1265) { - _elem1256 = new FieldSchema(); - _elem1256.read(iprot); - struct.success.add(_elem1256); + _elem1264 = new FieldSchema(); + _elem1264.read(iprot); + struct.success.add(_elem1264); } } struct.setSuccessIsSet(true); @@ -57511,14 +57652,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1258 = iprot.readListBegin(); - struct.success = new ArrayList(_list1258.size); - FieldSchema _elem1259; - for (int _i1260 = 0; _i1260 < _list1258.size; ++_i1260) + org.apache.thrift.protocol.TList _list1266 = iprot.readListBegin(); + struct.success = new ArrayList(_list1266.size); + FieldSchema _elem1267; + for (int _i1268 = 0; _i1268 < _list1266.size; ++_i1268) { - _elem1259 = new FieldSchema(); - _elem1259.read(iprot); - struct.success.add(_elem1259); + _elem1267 = new FieldSchema(); + _elem1267.read(iprot); + struct.success.add(_elem1267); } iprot.readListEnd(); } @@ -57571,9 +57712,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1261 : struct.success) + for (FieldSchema _iter1269 : struct.success) { - _iter1261.write(oprot); + _iter1269.write(oprot); } oprot.writeListEnd(); } @@ -57628,9 +57769,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1262 : struct.success) + for (FieldSchema _iter1270 : struct.success) { - _iter1262.write(oprot); + _iter1270.write(oprot); } } } @@ -57651,14 +57792,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1263 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1263.size); - FieldSchema _elem1264; - for (int _i1265 = 0; _i1265 < _list1263.size; ++_i1265) + org.apache.thrift.protocol.TList _list1271 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1271.size); + FieldSchema _elem1272; + for (int _i1273 = 0; _i1273 < _list1271.size; ++_i1273) { - _elem1264 = new FieldSchema(); - _elem1264.read(iprot); - struct.success.add(_elem1264); + _elem1272 = new FieldSchema(); + _elem1272.read(iprot); + struct.success.add(_elem1272); } } struct.setSuccessIsSet(true); @@ -60787,14 +60928,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1266 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list1266.size); - SQLPrimaryKey _elem1267; - for (int _i1268 = 0; _i1268 < _list1266.size; ++_i1268) + org.apache.thrift.protocol.TList _list1274 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list1274.size); + SQLPrimaryKey _elem1275; + for (int _i1276 = 0; _i1276 < _list1274.size; ++_i1276) { - _elem1267 = new SQLPrimaryKey(); - _elem1267.read(iprot); - struct.primaryKeys.add(_elem1267); + _elem1275 = new SQLPrimaryKey(); + _elem1275.read(iprot); + struct.primaryKeys.add(_elem1275); } iprot.readListEnd(); } @@ -60806,14 +60947,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1269 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list1269.size); - SQLForeignKey _elem1270; - for (int _i1271 = 0; _i1271 < _list1269.size; ++_i1271) + org.apache.thrift.protocol.TList _list1277 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list1277.size); + SQLForeignKey _elem1278; + for (int _i1279 = 0; _i1279 < _list1277.size; ++_i1279) { - _elem1270 = new SQLForeignKey(); - _elem1270.read(iprot); - struct.foreignKeys.add(_elem1270); + _elem1278 = new SQLForeignKey(); + _elem1278.read(iprot); + struct.foreignKeys.add(_elem1278); } iprot.readListEnd(); } @@ -60825,14 +60966,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1272 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list1272.size); - SQLUniqueConstraint _elem1273; - for (int _i1274 = 0; _i1274 < _list1272.size; ++_i1274) + org.apache.thrift.protocol.TList _list1280 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list1280.size); + SQLUniqueConstraint _elem1281; + for (int _i1282 = 0; _i1282 < _list1280.size; ++_i1282) { - _elem1273 = new SQLUniqueConstraint(); - _elem1273.read(iprot); - struct.uniqueConstraints.add(_elem1273); + _elem1281 = new SQLUniqueConstraint(); + _elem1281.read(iprot); + struct.uniqueConstraints.add(_elem1281); } iprot.readListEnd(); } @@ -60844,14 +60985,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1275 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list1275.size); - SQLNotNullConstraint _elem1276; - for (int _i1277 = 0; _i1277 < _list1275.size; ++_i1277) + org.apache.thrift.protocol.TList _list1283 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list1283.size); + SQLNotNullConstraint _elem1284; + for (int _i1285 = 0; _i1285 < _list1283.size; ++_i1285) { - _elem1276 = new SQLNotNullConstraint(); - _elem1276.read(iprot); - struct.notNullConstraints.add(_elem1276); + _elem1284 = new SQLNotNullConstraint(); + _elem1284.read(iprot); + struct.notNullConstraints.add(_elem1284); } iprot.readListEnd(); } @@ -60863,14 +61004,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 6: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1278 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList(_list1278.size); - SQLDefaultConstraint _elem1279; - for (int _i1280 = 0; _i1280 < _list1278.size; ++_i1280) + org.apache.thrift.protocol.TList _list1286 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list1286.size); + SQLDefaultConstraint _elem1287; + for (int _i1288 = 0; _i1288 < _list1286.size; ++_i1288) { - _elem1279 = new SQLDefaultConstraint(); - _elem1279.read(iprot); - struct.defaultConstraints.add(_elem1279); + _elem1287 = new SQLDefaultConstraint(); + _elem1287.read(iprot); + struct.defaultConstraints.add(_elem1287); } iprot.readListEnd(); } @@ -60882,14 +61023,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 7: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1281 = iprot.readListBegin(); - struct.checkConstraints = new ArrayList(_list1281.size); - SQLCheckConstraint _elem1282; - for (int _i1283 = 0; _i1283 < _list1281.size; ++_i1283) + org.apache.thrift.protocol.TList _list1289 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList(_list1289.size); + SQLCheckConstraint _elem1290; + for (int _i1291 = 0; _i1291 < _list1289.size; ++_i1291) { - _elem1282 = new SQLCheckConstraint(); - _elem1282.read(iprot); - struct.checkConstraints.add(_elem1282); + _elem1290 = new SQLCheckConstraint(); + _elem1290.read(iprot); + struct.checkConstraints.add(_elem1290); } iprot.readListEnd(); } @@ -60920,9 +61061,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter1284 : struct.primaryKeys) + for (SQLPrimaryKey _iter1292 : struct.primaryKeys) { - _iter1284.write(oprot); + _iter1292.write(oprot); } oprot.writeListEnd(); } @@ -60932,9 +61073,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter1285 : struct.foreignKeys) + for (SQLForeignKey _iter1293 : struct.foreignKeys) { - _iter1285.write(oprot); + _iter1293.write(oprot); } oprot.writeListEnd(); } @@ -60944,9 +61085,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter1286 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1294 : struct.uniqueConstraints) { - _iter1286.write(oprot); + _iter1294.write(oprot); } oprot.writeListEnd(); } @@ -60956,9 +61097,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter1287 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1295 : struct.notNullConstraints) { - _iter1287.write(oprot); + _iter1295.write(oprot); } oprot.writeListEnd(); } @@ -60968,9 +61109,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter1288 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1296 : struct.defaultConstraints) { - _iter1288.write(oprot); + _iter1296.write(oprot); } oprot.writeListEnd(); } @@ -60980,9 +61121,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter1289 : struct.checkConstraints) + for (SQLCheckConstraint _iter1297 : struct.checkConstraints) { - _iter1289.write(oprot); + _iter1297.write(oprot); } oprot.writeListEnd(); } @@ -61034,54 +61175,54 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter1290 : struct.primaryKeys) + for (SQLPrimaryKey _iter1298 : struct.primaryKeys) { - _iter1290.write(oprot); + _iter1298.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter1291 : struct.foreignKeys) + for (SQLForeignKey _iter1299 : struct.foreignKeys) { - _iter1291.write(oprot); + _iter1299.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter1292 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1300 : struct.uniqueConstraints) { - _iter1292.write(oprot); + _iter1300.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter1293 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1301 : struct.notNullConstraints) { - _iter1293.write(oprot); + _iter1301.write(oprot); } } } if (struct.isSetDefaultConstraints()) { { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter1294 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1302 : struct.defaultConstraints) { - _iter1294.write(oprot); + _iter1302.write(oprot); } } } if (struct.isSetCheckConstraints()) { { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter1295 : struct.checkConstraints) + for (SQLCheckConstraint _iter1303 : struct.checkConstraints) { - _iter1295.write(oprot); + _iter1303.write(oprot); } } } @@ -61098,84 +61239,84 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1296 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list1296.size); - SQLPrimaryKey _elem1297; - for (int _i1298 = 0; _i1298 < _list1296.size; ++_i1298) + org.apache.thrift.protocol.TList _list1304 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list1304.size); + SQLPrimaryKey _elem1305; + for (int _i1306 = 0; _i1306 < _list1304.size; ++_i1306) { - _elem1297 = new SQLPrimaryKey(); - _elem1297.read(iprot); - struct.primaryKeys.add(_elem1297); + _elem1305 = new SQLPrimaryKey(); + _elem1305.read(iprot); + struct.primaryKeys.add(_elem1305); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1299 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list1299.size); - SQLForeignKey _elem1300; - for (int _i1301 = 0; _i1301 < _list1299.size; ++_i1301) + org.apache.thrift.protocol.TList _list1307 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list1307.size); + SQLForeignKey _elem1308; + for (int _i1309 = 0; _i1309 < _list1307.size; ++_i1309) { - _elem1300 = new SQLForeignKey(); - _elem1300.read(iprot); - struct.foreignKeys.add(_elem1300); + _elem1308 = new SQLForeignKey(); + _elem1308.read(iprot); + struct.foreignKeys.add(_elem1308); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list1302 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list1302.size); - SQLUniqueConstraint _elem1303; - for (int _i1304 = 0; _i1304 < _list1302.size; ++_i1304) + org.apache.thrift.protocol.TList _list1310 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list1310.size); + SQLUniqueConstraint _elem1311; + for (int _i1312 = 0; _i1312 < _list1310.size; ++_i1312) { - _elem1303 = new SQLUniqueConstraint(); - _elem1303.read(iprot); - struct.uniqueConstraints.add(_elem1303); + _elem1311 = new SQLUniqueConstraint(); + _elem1311.read(iprot); + struct.uniqueConstraints.add(_elem1311); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1305 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list1305.size); - SQLNotNullConstraint _elem1306; - for (int _i1307 = 0; _i1307 < _list1305.size; ++_i1307) + org.apache.thrift.protocol.TList _list1313 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list1313.size); + SQLNotNullConstraint _elem1314; + for (int _i1315 = 0; _i1315 < _list1313.size; ++_i1315) { - _elem1306 = new SQLNotNullConstraint(); - _elem1306.read(iprot); - struct.notNullConstraints.add(_elem1306); + _elem1314 = new SQLNotNullConstraint(); + _elem1314.read(iprot); + struct.notNullConstraints.add(_elem1314); } } struct.setNotNullConstraintsIsSet(true); } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1308 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraints = new ArrayList(_list1308.size); - SQLDefaultConstraint _elem1309; - for (int _i1310 = 0; _i1310 < _list1308.size; ++_i1310) + org.apache.thrift.protocol.TList _list1316 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list1316.size); + SQLDefaultConstraint _elem1317; + for (int _i1318 = 0; _i1318 < _list1316.size; ++_i1318) { - _elem1309 = new SQLDefaultConstraint(); - _elem1309.read(iprot); - struct.defaultConstraints.add(_elem1309); + _elem1317 = new SQLDefaultConstraint(); + _elem1317.read(iprot); + struct.defaultConstraints.add(_elem1317); } } struct.setDefaultConstraintsIsSet(true); } if (incoming.get(6)) { { - org.apache.thrift.protocol.TList _list1311 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraints = new ArrayList(_list1311.size); - SQLCheckConstraint _elem1312; - for (int _i1313 = 0; _i1313 < _list1311.size; ++_i1313) + org.apache.thrift.protocol.TList _list1319 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraints = new ArrayList(_list1319.size); + SQLCheckConstraint _elem1320; + for (int _i1321 = 0; _i1321 < _list1319.size; ++_i1321) { - _elem1312 = new SQLCheckConstraint(); - _elem1312.read(iprot); - struct.checkConstraints.add(_elem1312); + _elem1320 = new SQLCheckConstraint(); + _elem1320.read(iprot); + struct.checkConstraints.add(_elem1320); } } struct.setCheckConstraintsIsSet(true); @@ -71366,13 +71507,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args case 3: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1314 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list1314.size); - String _elem1315; - for (int _i1316 = 0; _i1316 < _list1314.size; ++_i1316) + org.apache.thrift.protocol.TList _list1322 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list1322.size); + String _elem1323; + for (int _i1324 = 0; _i1324 < _list1322.size; ++_i1324) { - _elem1315 = iprot.readString(); - struct.partNames.add(_elem1315); + _elem1323 = iprot.readString(); + struct.partNames.add(_elem1323); } iprot.readListEnd(); } @@ -71408,9 +71549,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_arg oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter1317 : struct.partNames) + for (String _iter1325 : struct.partNames) { - oprot.writeString(_iter1317); + oprot.writeString(_iter1325); } oprot.writeListEnd(); } @@ -71453,9 +71594,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (String _iter1318 : struct.partNames) + for (String _iter1326 : struct.partNames) { - oprot.writeString(_iter1318); + oprot.writeString(_iter1326); } } } @@ -71475,13 +71616,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1319 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list1319.size); - String _elem1320; - for (int _i1321 = 0; _i1321 < _list1319.size; ++_i1321) + org.apache.thrift.protocol.TList _list1327 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list1327.size); + String _elem1328; + for (int _i1329 = 0; _i1329 < _list1327.size; ++_i1329) { - _elem1320 = iprot.readString(); - struct.partNames.add(_elem1320); + _elem1328 = iprot.readString(); + struct.partNames.add(_elem1328); } } struct.setPartNamesIsSet(true); @@ -73538,13 +73679,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1322 = iprot.readListBegin(); - struct.success = new ArrayList(_list1322.size); - String _elem1323; - for (int _i1324 = 0; _i1324 < _list1322.size; ++_i1324) + org.apache.thrift.protocol.TList _list1330 = iprot.readListBegin(); + struct.success = new ArrayList(_list1330.size); + String _elem1331; + for (int _i1332 = 0; _i1332 < _list1330.size; ++_i1332) { - _elem1323 = iprot.readString(); - struct.success.add(_elem1323); + _elem1331 = iprot.readString(); + struct.success.add(_elem1331); } iprot.readListEnd(); } @@ -73579,9 +73720,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1325 : struct.success) + for (String _iter1333 : struct.success) { - oprot.writeString(_iter1325); + oprot.writeString(_iter1333); } oprot.writeListEnd(); } @@ -73620,9 +73761,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1326 : struct.success) + for (String _iter1334 : struct.success) { - oprot.writeString(_iter1326); + oprot.writeString(_iter1334); } } } @@ -73637,13 +73778,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1327 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1327.size); - String _elem1328; - for (int _i1329 = 0; _i1329 < _list1327.size; ++_i1329) + org.apache.thrift.protocol.TList _list1335 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1335.size); + String _elem1336; + for (int _i1337 = 0; _i1337 < _list1335.size; ++_i1337) { - _elem1328 = iprot.readString(); - struct.success.add(_elem1328); + _elem1336 = iprot.readString(); + struct.success.add(_elem1336); } } struct.setSuccessIsSet(true); @@ -74617,13 +74758,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1330 = iprot.readListBegin(); - struct.success = new ArrayList(_list1330.size); - String _elem1331; - for (int _i1332 = 0; _i1332 < _list1330.size; ++_i1332) + org.apache.thrift.protocol.TList _list1338 = iprot.readListBegin(); + struct.success = new ArrayList(_list1338.size); + String _elem1339; + for (int _i1340 = 0; _i1340 < _list1338.size; ++_i1340) { - _elem1331 = iprot.readString(); - struct.success.add(_elem1331); + _elem1339 = iprot.readString(); + struct.success.add(_elem1339); } iprot.readListEnd(); } @@ -74658,9 +74799,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1333 : struct.success) + for (String _iter1341 : struct.success) { - oprot.writeString(_iter1333); + oprot.writeString(_iter1341); } oprot.writeListEnd(); } @@ -74699,9 +74840,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1334 : struct.success) + for (String _iter1342 : struct.success) { - oprot.writeString(_iter1334); + oprot.writeString(_iter1342); } } } @@ -74716,13 +74857,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1335 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1335.size); - String _elem1336; - for (int _i1337 = 0; _i1337 < _list1335.size; ++_i1337) + org.apache.thrift.protocol.TList _list1343 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1343.size); + String _elem1344; + for (int _i1345 = 0; _i1345 < _list1343.size; ++_i1345) { - _elem1336 = iprot.readString(); - struct.success.add(_elem1336); + _elem1344 = iprot.readString(); + struct.success.add(_elem1344); } } struct.setSuccessIsSet(true); @@ -75379,14 +75520,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_materialize case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1338 = iprot.readListBegin(); - struct.success = new ArrayList(_list1338.size); - Table _elem1339; - for (int _i1340 = 0; _i1340 < _list1338.size; ++_i1340) + org.apache.thrift.protocol.TList _list1346 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1346.size); + Table _elem1347; + for (int _i1348 = 0; _i1348 < _list1346.size; ++_i1348) { - _elem1339 = new Table(); - _elem1339.read(iprot); - struct.success.add(_elem1339); + _elem1347 = new Table(); + _elem1347.read(iprot); + struct.success.add(_elem1347); } iprot.readListEnd(); } @@ -75421,9 +75562,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_materializ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1341 : struct.success) + for (Table _iter1349 : struct.success) { - _iter1341.write(oprot); + _iter1349.write(oprot); } oprot.writeListEnd(); } @@ -75462,9 +75603,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_materialize if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1342 : struct.success) + for (Table _iter1350 : struct.success) { - _iter1342.write(oprot); + _iter1350.write(oprot); } } } @@ -75479,14 +75620,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_materialized BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1343 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1343.size); - Table _elem1344; - for (int _i1345 = 0; _i1345 < _list1343.size; ++_i1345) + org.apache.thrift.protocol.TList _list1351 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1351.size); + Table _elem1352; + for (int _i1353 = 0; _i1353 < _list1351.size; ++_i1353) { - _elem1344 = new Table(); - _elem1344.read(iprot); - struct.success.add(_elem1344); + _elem1352 = new Table(); + _elem1352.read(iprot); + struct.success.add(_elem1352); } } struct.setSuccessIsSet(true); @@ -76252,13 +76393,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1346 = iprot.readListBegin(); - struct.success = new ArrayList(_list1346.size); - String _elem1347; - for (int _i1348 = 0; _i1348 < _list1346.size; ++_i1348) + org.apache.thrift.protocol.TList _list1354 = iprot.readListBegin(); + struct.success = new ArrayList(_list1354.size); + String _elem1355; + for (int _i1356 = 0; _i1356 < _list1354.size; ++_i1356) { - _elem1347 = iprot.readString(); - struct.success.add(_elem1347); + _elem1355 = iprot.readString(); + struct.success.add(_elem1355); } iprot.readListEnd(); } @@ -76293,9 +76434,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1349 : struct.success) + for (String _iter1357 : struct.success) { - oprot.writeString(_iter1349); + oprot.writeString(_iter1357); } oprot.writeListEnd(); } @@ -76334,9 +76475,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1350 : struct.success) + for (String _iter1358 : struct.success) { - oprot.writeString(_iter1350); + oprot.writeString(_iter1358); } } } @@ -76351,13 +76492,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1351 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1351.size); - String _elem1352; - for (int _i1353 = 0; _i1353 < _list1351.size; ++_i1353) + org.apache.thrift.protocol.TList _list1359 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1359.size); + String _elem1360; + for (int _i1361 = 0; _i1361 < _list1359.size; ++_i1361) { - _elem1352 = iprot.readString(); - struct.success.add(_elem1352); + _elem1360 = iprot.readString(); + struct.success.add(_elem1360); } } struct.setSuccessIsSet(true); @@ -76862,13 +77003,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1354 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list1354.size); - String _elem1355; - for (int _i1356 = 0; _i1356 < _list1354.size; ++_i1356) + org.apache.thrift.protocol.TList _list1362 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list1362.size); + String _elem1363; + for (int _i1364 = 0; _i1364 < _list1362.size; ++_i1364) { - _elem1355 = iprot.readString(); - struct.tbl_types.add(_elem1355); + _elem1363 = iprot.readString(); + struct.tbl_types.add(_elem1363); } iprot.readListEnd(); } @@ -76904,9 +77045,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter1357 : struct.tbl_types) + for (String _iter1365 : struct.tbl_types) { - oprot.writeString(_iter1357); + oprot.writeString(_iter1365); } oprot.writeListEnd(); } @@ -76949,9 +77090,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter1358 : struct.tbl_types) + for (String _iter1366 : struct.tbl_types) { - oprot.writeString(_iter1358); + oprot.writeString(_iter1366); } } } @@ -76971,13 +77112,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1359 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list1359.size); - String _elem1360; - for (int _i1361 = 0; _i1361 < _list1359.size; ++_i1361) + org.apache.thrift.protocol.TList _list1367 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list1367.size); + String _elem1368; + for (int _i1369 = 0; _i1369 < _list1367.size; ++_i1369) { - _elem1360 = iprot.readString(); - struct.tbl_types.add(_elem1360); + _elem1368 = iprot.readString(); + struct.tbl_types.add(_elem1368); } } struct.setTbl_typesIsSet(true); @@ -77383,14 +77524,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1362 = iprot.readListBegin(); - struct.success = new ArrayList(_list1362.size); - TableMeta _elem1363; - for (int _i1364 = 0; _i1364 < _list1362.size; ++_i1364) + org.apache.thrift.protocol.TList _list1370 = iprot.readListBegin(); + struct.success = new ArrayList(_list1370.size); + TableMeta _elem1371; + for (int _i1372 = 0; _i1372 < _list1370.size; ++_i1372) { - _elem1363 = new TableMeta(); - _elem1363.read(iprot); - struct.success.add(_elem1363); + _elem1371 = new TableMeta(); + _elem1371.read(iprot); + struct.success.add(_elem1371); } iprot.readListEnd(); } @@ -77425,9 +77566,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter1365 : struct.success) + for (TableMeta _iter1373 : struct.success) { - _iter1365.write(oprot); + _iter1373.write(oprot); } oprot.writeListEnd(); } @@ -77466,9 +77607,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter1366 : struct.success) + for (TableMeta _iter1374 : struct.success) { - _iter1366.write(oprot); + _iter1374.write(oprot); } } } @@ -77483,14 +77624,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1367 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1367.size); - TableMeta _elem1368; - for (int _i1369 = 0; _i1369 < _list1367.size; ++_i1369) + org.apache.thrift.protocol.TList _list1375 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1375.size); + TableMeta _elem1376; + for (int _i1377 = 0; _i1377 < _list1375.size; ++_i1377) { - _elem1368 = new TableMeta(); - _elem1368.read(iprot); - struct.success.add(_elem1368); + _elem1376 = new TableMeta(); + _elem1376.read(iprot); + struct.success.add(_elem1376); } } struct.setSuccessIsSet(true); @@ -78256,13 +78397,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1370 = iprot.readListBegin(); - struct.success = new ArrayList(_list1370.size); - String _elem1371; - for (int _i1372 = 0; _i1372 < _list1370.size; ++_i1372) + org.apache.thrift.protocol.TList _list1378 = iprot.readListBegin(); + struct.success = new ArrayList(_list1378.size); + String _elem1379; + for (int _i1380 = 0; _i1380 < _list1378.size; ++_i1380) { - _elem1371 = iprot.readString(); - struct.success.add(_elem1371); + _elem1379 = iprot.readString(); + struct.success.add(_elem1379); } iprot.readListEnd(); } @@ -78297,9 +78438,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1373 : struct.success) + for (String _iter1381 : struct.success) { - oprot.writeString(_iter1373); + oprot.writeString(_iter1381); } oprot.writeListEnd(); } @@ -78338,9 +78479,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1374 : struct.success) + for (String _iter1382 : struct.success) { - oprot.writeString(_iter1374); + oprot.writeString(_iter1382); } } } @@ -78355,13 +78496,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1375 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1375.size); - String _elem1376; - for (int _i1377 = 0; _i1377 < _list1375.size; ++_i1377) + org.apache.thrift.protocol.TList _list1383 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1383.size); + String _elem1384; + for (int _i1385 = 0; _i1385 < _list1383.size; ++_i1385) { - _elem1376 = iprot.readString(); - struct.success.add(_elem1376); + _elem1384 = iprot.readString(); + struct.success.add(_elem1384); } } struct.setSuccessIsSet(true); @@ -79814,13 +79955,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1378 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1378.size); - String _elem1379; - for (int _i1380 = 0; _i1380 < _list1378.size; ++_i1380) + org.apache.thrift.protocol.TList _list1386 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1386.size); + String _elem1387; + for (int _i1388 = 0; _i1388 < _list1386.size; ++_i1388) { - _elem1379 = iprot.readString(); - struct.tbl_names.add(_elem1379); + _elem1387 = iprot.readString(); + struct.tbl_names.add(_elem1387); } iprot.readListEnd(); } @@ -79851,9 +79992,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1381 : struct.tbl_names) + for (String _iter1389 : struct.tbl_names) { - oprot.writeString(_iter1381); + oprot.writeString(_iter1389); } oprot.writeListEnd(); } @@ -79890,9 +80031,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1382 : struct.tbl_names) + for (String _iter1390 : struct.tbl_names) { - oprot.writeString(_iter1382); + oprot.writeString(_iter1390); } } } @@ -79908,13 +80049,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1383 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1383.size); - String _elem1384; - for (int _i1385 = 0; _i1385 < _list1383.size; ++_i1385) + org.apache.thrift.protocol.TList _list1391 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1391.size); + String _elem1392; + for (int _i1393 = 0; _i1393 < _list1391.size; ++_i1393) { - _elem1384 = iprot.readString(); - struct.tbl_names.add(_elem1384); + _elem1392 = iprot.readString(); + struct.tbl_names.add(_elem1392); } } struct.setTbl_namesIsSet(true); @@ -80239,14 +80380,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1386 = iprot.readListBegin(); - struct.success = new ArrayList
(_list1386.size); - Table _elem1387; - for (int _i1388 = 0; _i1388 < _list1386.size; ++_i1388) + org.apache.thrift.protocol.TList _list1394 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1394.size); + Table _elem1395; + for (int _i1396 = 0; _i1396 < _list1394.size; ++_i1396) { - _elem1387 = new Table(); - _elem1387.read(iprot); - struct.success.add(_elem1387); + _elem1395 = new Table(); + _elem1395.read(iprot); + struct.success.add(_elem1395); } iprot.readListEnd(); } @@ -80272,9 +80413,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1389 : struct.success) + for (Table _iter1397 : struct.success) { - _iter1389.write(oprot); + _iter1397.write(oprot); } oprot.writeListEnd(); } @@ -80305,9 +80446,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1390 : struct.success) + for (Table _iter1398 : struct.success) { - _iter1390.write(oprot); + _iter1398.write(oprot); } } } @@ -80319,14 +80460,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1391 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1391.size); - Table _elem1392; - for (int _i1393 = 0; _i1393 < _list1391.size; ++_i1393) + org.apache.thrift.protocol.TList _list1399 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1399.size); + Table _elem1400; + for (int _i1401 = 0; _i1401 < _list1399.size; ++_i1401) { - _elem1392 = new Table(); - _elem1392.read(iprot); - struct.success.add(_elem1392); + _elem1400 = new Table(); + _elem1400.read(iprot); + struct.success.add(_elem1400); } } struct.setSuccessIsSet(true); @@ -81095,14 +81236,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_ext_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1394 = iprot.readListBegin(); - struct.success = new ArrayList(_list1394.size); - ExtendedTableInfo _elem1395; - for (int _i1396 = 0; _i1396 < _list1394.size; ++_i1396) + org.apache.thrift.protocol.TList _list1402 = iprot.readListBegin(); + struct.success = new ArrayList(_list1402.size); + ExtendedTableInfo _elem1403; + for (int _i1404 = 0; _i1404 < _list1402.size; ++_i1404) { - _elem1395 = new ExtendedTableInfo(); - _elem1395.read(iprot); - struct.success.add(_elem1395); + _elem1403 = new ExtendedTableInfo(); + _elem1403.read(iprot); + struct.success.add(_elem1403); } iprot.readListEnd(); } @@ -81137,9 +81278,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_ext_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (ExtendedTableInfo _iter1397 : struct.success) + for (ExtendedTableInfo _iter1405 : struct.success) { - _iter1397.write(oprot); + _iter1405.write(oprot); } oprot.writeListEnd(); } @@ -81178,9 +81319,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_ext_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (ExtendedTableInfo _iter1398 : struct.success) + for (ExtendedTableInfo _iter1406 : struct.success) { - _iter1398.write(oprot); + _iter1406.write(oprot); } } } @@ -81195,14 +81336,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_ext_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1399 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1399.size); - ExtendedTableInfo _elem1400; - for (int _i1401 = 0; _i1401 < _list1399.size; ++_i1401) + org.apache.thrift.protocol.TList _list1407 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1407.size); + ExtendedTableInfo _elem1408; + for (int _i1409 = 0; _i1409 < _list1407.size; ++_i1409) { - _elem1400 = new ExtendedTableInfo(); - _elem1400.read(iprot); - struct.success.add(_elem1400); + _elem1408 = new ExtendedTableInfo(); + _elem1408.read(iprot); + struct.success.add(_elem1408); } } struct.setSuccessIsSet(true); @@ -86715,13 +86856,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1402 = iprot.readListBegin(); - struct.success = new ArrayList(_list1402.size); - String _elem1403; - for (int _i1404 = 0; _i1404 < _list1402.size; ++_i1404) + org.apache.thrift.protocol.TList _list1410 = iprot.readListBegin(); + struct.success = new ArrayList(_list1410.size); + String _elem1411; + for (int _i1412 = 0; _i1412 < _list1410.size; ++_i1412) { - _elem1403 = iprot.readString(); - struct.success.add(_elem1403); + _elem1411 = iprot.readString(); + struct.success.add(_elem1411); } iprot.readListEnd(); } @@ -86774,9 +86915,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1405 : struct.success) + for (String _iter1413 : struct.success) { - oprot.writeString(_iter1405); + oprot.writeString(_iter1413); } oprot.writeListEnd(); } @@ -86831,9 +86972,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1406 : struct.success) + for (String _iter1414 : struct.success) { - oprot.writeString(_iter1406); + oprot.writeString(_iter1414); } } } @@ -86854,13 +86995,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1407 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1407.size); - String _elem1408; - for (int _i1409 = 0; _i1409 < _list1407.size; ++_i1409) + org.apache.thrift.protocol.TList _list1415 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1415.size); + String _elem1416; + for (int _i1417 = 0; _i1417 < _list1415.size; ++_i1417) { - _elem1408 = iprot.readString(); - struct.success.add(_elem1408); + _elem1416 = iprot.readString(); + struct.success.add(_elem1416); } } struct.setSuccessIsSet(true); @@ -93657,14 +93798,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1410 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1410.size); - Partition _elem1411; - for (int _i1412 = 0; _i1412 < _list1410.size; ++_i1412) + org.apache.thrift.protocol.TList _list1418 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1418.size); + Partition _elem1419; + for (int _i1420 = 0; _i1420 < _list1418.size; ++_i1420) { - _elem1411 = new Partition(); - _elem1411.read(iprot); - struct.new_parts.add(_elem1411); + _elem1419 = new Partition(); + _elem1419.read(iprot); + struct.new_parts.add(_elem1419); } iprot.readListEnd(); } @@ -93690,9 +93831,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1413 : struct.new_parts) + for (Partition _iter1421 : struct.new_parts) { - _iter1413.write(oprot); + _iter1421.write(oprot); } oprot.writeListEnd(); } @@ -93723,9 +93864,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1414 : struct.new_parts) + for (Partition _iter1422 : struct.new_parts) { - _iter1414.write(oprot); + _iter1422.write(oprot); } } } @@ -93737,14 +93878,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1415 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1415.size); - Partition _elem1416; - for (int _i1417 = 0; _i1417 < _list1415.size; ++_i1417) + org.apache.thrift.protocol.TList _list1423 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1423.size); + Partition _elem1424; + for (int _i1425 = 0; _i1425 < _list1423.size; ++_i1425) { - _elem1416 = new Partition(); - _elem1416.read(iprot); - struct.new_parts.add(_elem1416); + _elem1424 = new Partition(); + _elem1424.read(iprot); + struct.new_parts.add(_elem1424); } } struct.setNew_partsIsSet(true); @@ -94745,14 +94886,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1418 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1418.size); - PartitionSpec _elem1419; - for (int _i1420 = 0; _i1420 < _list1418.size; ++_i1420) + org.apache.thrift.protocol.TList _list1426 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1426.size); + PartitionSpec _elem1427; + for (int _i1428 = 0; _i1428 < _list1426.size; ++_i1428) { - _elem1419 = new PartitionSpec(); - _elem1419.read(iprot); - struct.new_parts.add(_elem1419); + _elem1427 = new PartitionSpec(); + _elem1427.read(iprot); + struct.new_parts.add(_elem1427); } iprot.readListEnd(); } @@ -94778,9 +94919,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter1421 : struct.new_parts) + for (PartitionSpec _iter1429 : struct.new_parts) { - _iter1421.write(oprot); + _iter1429.write(oprot); } oprot.writeListEnd(); } @@ -94811,9 +94952,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter1422 : struct.new_parts) + for (PartitionSpec _iter1430 : struct.new_parts) { - _iter1422.write(oprot); + _iter1430.write(oprot); } } } @@ -94825,14 +94966,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1423 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1423.size); - PartitionSpec _elem1424; - for (int _i1425 = 0; _i1425 < _list1423.size; ++_i1425) + org.apache.thrift.protocol.TList _list1431 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1431.size); + PartitionSpec _elem1432; + for (int _i1433 = 0; _i1433 < _list1431.size; ++_i1433) { - _elem1424 = new PartitionSpec(); - _elem1424.read(iprot); - struct.new_parts.add(_elem1424); + _elem1432 = new PartitionSpec(); + _elem1432.read(iprot); + struct.new_parts.add(_elem1432); } } struct.setNew_partsIsSet(true); @@ -96008,13 +96149,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1426 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1426.size); - String _elem1427; - for (int _i1428 = 0; _i1428 < _list1426.size; ++_i1428) + org.apache.thrift.protocol.TList _list1434 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1434.size); + String _elem1435; + for (int _i1436 = 0; _i1436 < _list1434.size; ++_i1436) { - _elem1427 = iprot.readString(); - struct.part_vals.add(_elem1427); + _elem1435 = iprot.readString(); + struct.part_vals.add(_elem1435); } iprot.readListEnd(); } @@ -96050,9 +96191,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1429 : struct.part_vals) + for (String _iter1437 : struct.part_vals) { - oprot.writeString(_iter1429); + oprot.writeString(_iter1437); } oprot.writeListEnd(); } @@ -96095,9 +96236,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1430 : struct.part_vals) + for (String _iter1438 : struct.part_vals) { - oprot.writeString(_iter1430); + oprot.writeString(_iter1438); } } } @@ -96117,13 +96258,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1431 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1431.size); - String _elem1432; - for (int _i1433 = 0; _i1433 < _list1431.size; ++_i1433) + org.apache.thrift.protocol.TList _list1439 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1439.size); + String _elem1440; + for (int _i1441 = 0; _i1441 < _list1439.size; ++_i1441) { - _elem1432 = iprot.readString(); - struct.part_vals.add(_elem1432); + _elem1440 = iprot.readString(); + struct.part_vals.add(_elem1440); } } struct.setPart_valsIsSet(true); @@ -98432,13 +98573,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1434 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1434.size); - String _elem1435; - for (int _i1436 = 0; _i1436 < _list1434.size; ++_i1436) + org.apache.thrift.protocol.TList _list1442 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1442.size); + String _elem1443; + for (int _i1444 = 0; _i1444 < _list1442.size; ++_i1444) { - _elem1435 = iprot.readString(); - struct.part_vals.add(_elem1435); + _elem1443 = iprot.readString(); + struct.part_vals.add(_elem1443); } iprot.readListEnd(); } @@ -98483,9 +98624,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1437 : struct.part_vals) + for (String _iter1445 : struct.part_vals) { - oprot.writeString(_iter1437); + oprot.writeString(_iter1445); } oprot.writeListEnd(); } @@ -98536,9 +98677,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1438 : struct.part_vals) + for (String _iter1446 : struct.part_vals) { - oprot.writeString(_iter1438); + oprot.writeString(_iter1446); } } } @@ -98561,13 +98702,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1439 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1439.size); - String _elem1440; - for (int _i1441 = 0; _i1441 < _list1439.size; ++_i1441) + org.apache.thrift.protocol.TList _list1447 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1447.size); + String _elem1448; + for (int _i1449 = 0; _i1449 < _list1447.size; ++_i1449) { - _elem1440 = iprot.readString(); - struct.part_vals.add(_elem1440); + _elem1448 = iprot.readString(); + struct.part_vals.add(_elem1448); } } struct.setPart_valsIsSet(true); @@ -102437,13 +102578,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1442 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1442.size); - String _elem1443; - for (int _i1444 = 0; _i1444 < _list1442.size; ++_i1444) + org.apache.thrift.protocol.TList _list1450 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1450.size); + String _elem1451; + for (int _i1452 = 0; _i1452 < _list1450.size; ++_i1452) { - _elem1443 = iprot.readString(); - struct.part_vals.add(_elem1443); + _elem1451 = iprot.readString(); + struct.part_vals.add(_elem1451); } iprot.readListEnd(); } @@ -102487,9 +102628,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1445 : struct.part_vals) + for (String _iter1453 : struct.part_vals) { - oprot.writeString(_iter1445); + oprot.writeString(_iter1453); } oprot.writeListEnd(); } @@ -102538,9 +102679,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1446 : struct.part_vals) + for (String _iter1454 : struct.part_vals) { - oprot.writeString(_iter1446); + oprot.writeString(_iter1454); } } } @@ -102563,13 +102704,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1447 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1447.size); - String _elem1448; - for (int _i1449 = 0; _i1449 < _list1447.size; ++_i1449) + org.apache.thrift.protocol.TList _list1455 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1455.size); + String _elem1456; + for (int _i1457 = 0; _i1457 < _list1455.size; ++_i1457) { - _elem1448 = iprot.readString(); - struct.part_vals.add(_elem1448); + _elem1456 = iprot.readString(); + struct.part_vals.add(_elem1456); } } struct.setPart_valsIsSet(true); @@ -103808,13 +103949,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1450 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1450.size); - String _elem1451; - for (int _i1452 = 0; _i1452 < _list1450.size; ++_i1452) + org.apache.thrift.protocol.TList _list1458 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1458.size); + String _elem1459; + for (int _i1460 = 0; _i1460 < _list1458.size; ++_i1460) { - _elem1451 = iprot.readString(); - struct.part_vals.add(_elem1451); + _elem1459 = iprot.readString(); + struct.part_vals.add(_elem1459); } iprot.readListEnd(); } @@ -103867,9 +104008,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1453 : struct.part_vals) + for (String _iter1461 : struct.part_vals) { - oprot.writeString(_iter1453); + oprot.writeString(_iter1461); } oprot.writeListEnd(); } @@ -103926,9 +104067,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1454 : struct.part_vals) + for (String _iter1462 : struct.part_vals) { - oprot.writeString(_iter1454); + oprot.writeString(_iter1462); } } } @@ -103954,13 +104095,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1455 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1455.size); - String _elem1456; - for (int _i1457 = 0; _i1457 < _list1455.size; ++_i1457) + org.apache.thrift.protocol.TList _list1463 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1463.size); + String _elem1464; + for (int _i1465 = 0; _i1465 < _list1463.size; ++_i1465) { - _elem1456 = iprot.readString(); - struct.part_vals.add(_elem1456); + _elem1464 = iprot.readString(); + struct.part_vals.add(_elem1464); } } struct.setPart_valsIsSet(true); @@ -108562,13 +108703,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1458 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1458.size); - String _elem1459; - for (int _i1460 = 0; _i1460 < _list1458.size; ++_i1460) + org.apache.thrift.protocol.TList _list1466 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1466.size); + String _elem1467; + for (int _i1468 = 0; _i1468 < _list1466.size; ++_i1468) { - _elem1459 = iprot.readString(); - struct.part_vals.add(_elem1459); + _elem1467 = iprot.readString(); + struct.part_vals.add(_elem1467); } iprot.readListEnd(); } @@ -108604,9 +108745,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1461 : struct.part_vals) + for (String _iter1469 : struct.part_vals) { - oprot.writeString(_iter1461); + oprot.writeString(_iter1469); } oprot.writeListEnd(); } @@ -108649,9 +108790,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1462 : struct.part_vals) + for (String _iter1470 : struct.part_vals) { - oprot.writeString(_iter1462); + oprot.writeString(_iter1470); } } } @@ -108671,13 +108812,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1463 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1463.size); - String _elem1464; - for (int _i1465 = 0; _i1465 < _list1463.size; ++_i1465) + org.apache.thrift.protocol.TList _list1471 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1471.size); + String _elem1472; + for (int _i1473 = 0; _i1473 < _list1471.size; ++_i1473) { - _elem1464 = iprot.readString(); - struct.part_vals.add(_elem1464); + _elem1472 = iprot.readString(); + struct.part_vals.add(_elem1472); } } struct.setPart_valsIsSet(true); @@ -109895,15 +110036,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1466 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1466.size); - String _key1467; - String _val1468; - for (int _i1469 = 0; _i1469 < _map1466.size; ++_i1469) + org.apache.thrift.protocol.TMap _map1474 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1474.size); + String _key1475; + String _val1476; + for (int _i1477 = 0; _i1477 < _map1474.size; ++_i1477) { - _key1467 = iprot.readString(); - _val1468 = iprot.readString(); - struct.partitionSpecs.put(_key1467, _val1468); + _key1475 = iprot.readString(); + _val1476 = iprot.readString(); + struct.partitionSpecs.put(_key1475, _val1476); } iprot.readMapEnd(); } @@ -109961,10 +110102,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1470 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1478 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1470.getKey()); - oprot.writeString(_iter1470.getValue()); + oprot.writeString(_iter1478.getKey()); + oprot.writeString(_iter1478.getValue()); } oprot.writeMapEnd(); } @@ -110027,10 +110168,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1471 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1479 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1471.getKey()); - oprot.writeString(_iter1471.getValue()); + oprot.writeString(_iter1479.getKey()); + oprot.writeString(_iter1479.getValue()); } } } @@ -110054,15 +110195,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1472 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1472.size); - String _key1473; - String _val1474; - for (int _i1475 = 0; _i1475 < _map1472.size; ++_i1475) + org.apache.thrift.protocol.TMap _map1480 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1480.size); + String _key1481; + String _val1482; + for (int _i1483 = 0; _i1483 < _map1480.size; ++_i1483) { - _key1473 = iprot.readString(); - _val1474 = iprot.readString(); - struct.partitionSpecs.put(_key1473, _val1474); + _key1481 = iprot.readString(); + _val1482 = iprot.readString(); + struct.partitionSpecs.put(_key1481, _val1482); } } struct.setPartitionSpecsIsSet(true); @@ -111508,15 +111649,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1476 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1476.size); - String _key1477; - String _val1478; - for (int _i1479 = 0; _i1479 < _map1476.size; ++_i1479) + org.apache.thrift.protocol.TMap _map1484 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1484.size); + String _key1485; + String _val1486; + for (int _i1487 = 0; _i1487 < _map1484.size; ++_i1487) { - _key1477 = iprot.readString(); - _val1478 = iprot.readString(); - struct.partitionSpecs.put(_key1477, _val1478); + _key1485 = iprot.readString(); + _val1486 = iprot.readString(); + struct.partitionSpecs.put(_key1485, _val1486); } iprot.readMapEnd(); } @@ -111574,10 +111715,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1480 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1488 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1480.getKey()); - oprot.writeString(_iter1480.getValue()); + oprot.writeString(_iter1488.getKey()); + oprot.writeString(_iter1488.getValue()); } oprot.writeMapEnd(); } @@ -111640,10 +111781,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1481 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1489 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1481.getKey()); - oprot.writeString(_iter1481.getValue()); + oprot.writeString(_iter1489.getKey()); + oprot.writeString(_iter1489.getValue()); } } } @@ -111667,15 +111808,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1482 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1482.size); - String _key1483; - String _val1484; - for (int _i1485 = 0; _i1485 < _map1482.size; ++_i1485) + org.apache.thrift.protocol.TMap _map1490 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1490.size); + String _key1491; + String _val1492; + for (int _i1493 = 0; _i1493 < _map1490.size; ++_i1493) { - _key1483 = iprot.readString(); - _val1484 = iprot.readString(); - struct.partitionSpecs.put(_key1483, _val1484); + _key1491 = iprot.readString(); + _val1492 = iprot.readString(); + struct.partitionSpecs.put(_key1491, _val1492); } } struct.setPartitionSpecsIsSet(true); @@ -112340,14 +112481,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1486 = iprot.readListBegin(); - struct.success = new ArrayList(_list1486.size); - Partition _elem1487; - for (int _i1488 = 0; _i1488 < _list1486.size; ++_i1488) + org.apache.thrift.protocol.TList _list1494 = iprot.readListBegin(); + struct.success = new ArrayList(_list1494.size); + Partition _elem1495; + for (int _i1496 = 0; _i1496 < _list1494.size; ++_i1496) { - _elem1487 = new Partition(); - _elem1487.read(iprot); - struct.success.add(_elem1487); + _elem1495 = new Partition(); + _elem1495.read(iprot); + struct.success.add(_elem1495); } iprot.readListEnd(); } @@ -112409,9 +112550,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1489 : struct.success) + for (Partition _iter1497 : struct.success) { - _iter1489.write(oprot); + _iter1497.write(oprot); } oprot.writeListEnd(); } @@ -112474,9 +112615,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1490 : struct.success) + for (Partition _iter1498 : struct.success) { - _iter1490.write(oprot); + _iter1498.write(oprot); } } } @@ -112500,14 +112641,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1491 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1491.size); - Partition _elem1492; - for (int _i1493 = 0; _i1493 < _list1491.size; ++_i1493) + org.apache.thrift.protocol.TList _list1499 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1499.size); + Partition _elem1500; + for (int _i1501 = 0; _i1501 < _list1499.size; ++_i1501) { - _elem1492 = new Partition(); - _elem1492.read(iprot); - struct.success.add(_elem1492); + _elem1500 = new Partition(); + _elem1500.read(iprot); + struct.success.add(_elem1500); } } struct.setSuccessIsSet(true); @@ -113206,13 +113347,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1494 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1494.size); - String _elem1495; - for (int _i1496 = 0; _i1496 < _list1494.size; ++_i1496) + org.apache.thrift.protocol.TList _list1502 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1502.size); + String _elem1503; + for (int _i1504 = 0; _i1504 < _list1502.size; ++_i1504) { - _elem1495 = iprot.readString(); - struct.part_vals.add(_elem1495); + _elem1503 = iprot.readString(); + struct.part_vals.add(_elem1503); } iprot.readListEnd(); } @@ -113232,13 +113373,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1497 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1497.size); - String _elem1498; - for (int _i1499 = 0; _i1499 < _list1497.size; ++_i1499) + org.apache.thrift.protocol.TList _list1505 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1505.size); + String _elem1506; + for (int _i1507 = 0; _i1507 < _list1505.size; ++_i1507) { - _elem1498 = iprot.readString(); - struct.group_names.add(_elem1498); + _elem1506 = iprot.readString(); + struct.group_names.add(_elem1506); } iprot.readListEnd(); } @@ -113274,9 +113415,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1500 : struct.part_vals) + for (String _iter1508 : struct.part_vals) { - oprot.writeString(_iter1500); + oprot.writeString(_iter1508); } oprot.writeListEnd(); } @@ -113291,9 +113432,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1501 : struct.group_names) + for (String _iter1509 : struct.group_names) { - oprot.writeString(_iter1501); + oprot.writeString(_iter1509); } oprot.writeListEnd(); } @@ -113342,9 +113483,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1502 : struct.part_vals) + for (String _iter1510 : struct.part_vals) { - oprot.writeString(_iter1502); + oprot.writeString(_iter1510); } } } @@ -113354,9 +113495,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1503 : struct.group_names) + for (String _iter1511 : struct.group_names) { - oprot.writeString(_iter1503); + oprot.writeString(_iter1511); } } } @@ -113376,13 +113517,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1504 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1504.size); - String _elem1505; - for (int _i1506 = 0; _i1506 < _list1504.size; ++_i1506) + org.apache.thrift.protocol.TList _list1512 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1512.size); + String _elem1513; + for (int _i1514 = 0; _i1514 < _list1512.size; ++_i1514) { - _elem1505 = iprot.readString(); - struct.part_vals.add(_elem1505); + _elem1513 = iprot.readString(); + struct.part_vals.add(_elem1513); } } struct.setPart_valsIsSet(true); @@ -113393,13 +113534,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1507 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1507.size); - String _elem1508; - for (int _i1509 = 0; _i1509 < _list1507.size; ++_i1509) + org.apache.thrift.protocol.TList _list1515 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1515.size); + String _elem1516; + for (int _i1517 = 0; _i1517 < _list1515.size; ++_i1517) { - _elem1508 = iprot.readString(); - struct.group_names.add(_elem1508); + _elem1516 = iprot.readString(); + struct.group_names.add(_elem1516); } } struct.setGroup_namesIsSet(true); @@ -116168,14 +116309,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1510 = iprot.readListBegin(); - struct.success = new ArrayList(_list1510.size); - Partition _elem1511; - for (int _i1512 = 0; _i1512 < _list1510.size; ++_i1512) + org.apache.thrift.protocol.TList _list1518 = iprot.readListBegin(); + struct.success = new ArrayList(_list1518.size); + Partition _elem1519; + for (int _i1520 = 0; _i1520 < _list1518.size; ++_i1520) { - _elem1511 = new Partition(); - _elem1511.read(iprot); - struct.success.add(_elem1511); + _elem1519 = new Partition(); + _elem1519.read(iprot); + struct.success.add(_elem1519); } iprot.readListEnd(); } @@ -116219,9 +116360,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1513 : struct.success) + for (Partition _iter1521 : struct.success) { - _iter1513.write(oprot); + _iter1521.write(oprot); } oprot.writeListEnd(); } @@ -116268,9 +116409,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1514 : struct.success) + for (Partition _iter1522 : struct.success) { - _iter1514.write(oprot); + _iter1522.write(oprot); } } } @@ -116288,14 +116429,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1515 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1515.size); - Partition _elem1516; - for (int _i1517 = 0; _i1517 < _list1515.size; ++_i1517) + org.apache.thrift.protocol.TList _list1523 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1523.size); + Partition _elem1524; + for (int _i1525 = 0; _i1525 < _list1523.size; ++_i1525) { - _elem1516 = new Partition(); - _elem1516.read(iprot); - struct.success.add(_elem1516); + _elem1524 = new Partition(); + _elem1524.read(iprot); + struct.success.add(_elem1524); } } struct.setSuccessIsSet(true); @@ -116985,13 +117126,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1518 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1518.size); - String _elem1519; - for (int _i1520 = 0; _i1520 < _list1518.size; ++_i1520) + org.apache.thrift.protocol.TList _list1526 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1526.size); + String _elem1527; + for (int _i1528 = 0; _i1528 < _list1526.size; ++_i1528) { - _elem1519 = iprot.readString(); - struct.group_names.add(_elem1519); + _elem1527 = iprot.readString(); + struct.group_names.add(_elem1527); } iprot.readListEnd(); } @@ -117035,9 +117176,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1521 : struct.group_names) + for (String _iter1529 : struct.group_names) { - oprot.writeString(_iter1521); + oprot.writeString(_iter1529); } oprot.writeListEnd(); } @@ -117092,9 +117233,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1522 : struct.group_names) + for (String _iter1530 : struct.group_names) { - oprot.writeString(_iter1522); + oprot.writeString(_iter1530); } } } @@ -117122,13 +117263,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1523 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1523.size); - String _elem1524; - for (int _i1525 = 0; _i1525 < _list1523.size; ++_i1525) + org.apache.thrift.protocol.TList _list1531 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1531.size); + String _elem1532; + for (int _i1533 = 0; _i1533 < _list1531.size; ++_i1533) { - _elem1524 = iprot.readString(); - struct.group_names.add(_elem1524); + _elem1532 = iprot.readString(); + struct.group_names.add(_elem1532); } } struct.setGroup_namesIsSet(true); @@ -117615,14 +117756,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1526 = iprot.readListBegin(); - struct.success = new ArrayList(_list1526.size); - Partition _elem1527; - for (int _i1528 = 0; _i1528 < _list1526.size; ++_i1528) + org.apache.thrift.protocol.TList _list1534 = iprot.readListBegin(); + struct.success = new ArrayList(_list1534.size); + Partition _elem1535; + for (int _i1536 = 0; _i1536 < _list1534.size; ++_i1536) { - _elem1527 = new Partition(); - _elem1527.read(iprot); - struct.success.add(_elem1527); + _elem1535 = new Partition(); + _elem1535.read(iprot); + struct.success.add(_elem1535); } iprot.readListEnd(); } @@ -117666,9 +117807,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1529 : struct.success) + for (Partition _iter1537 : struct.success) { - _iter1529.write(oprot); + _iter1537.write(oprot); } oprot.writeListEnd(); } @@ -117715,9 +117856,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1530 : struct.success) + for (Partition _iter1538 : struct.success) { - _iter1530.write(oprot); + _iter1538.write(oprot); } } } @@ -117735,14 +117876,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1531 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1531.size); - Partition _elem1532; - for (int _i1533 = 0; _i1533 < _list1531.size; ++_i1533) + org.apache.thrift.protocol.TList _list1539 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1539.size); + Partition _elem1540; + for (int _i1541 = 0; _i1541 < _list1539.size; ++_i1541) { - _elem1532 = new Partition(); - _elem1532.read(iprot); - struct.success.add(_elem1532); + _elem1540 = new Partition(); + _elem1540.read(iprot); + struct.success.add(_elem1540); } } struct.setSuccessIsSet(true); @@ -118805,14 +118946,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1534 = iprot.readListBegin(); - struct.success = new ArrayList(_list1534.size); - PartitionSpec _elem1535; - for (int _i1536 = 0; _i1536 < _list1534.size; ++_i1536) + org.apache.thrift.protocol.TList _list1542 = iprot.readListBegin(); + struct.success = new ArrayList(_list1542.size); + PartitionSpec _elem1543; + for (int _i1544 = 0; _i1544 < _list1542.size; ++_i1544) { - _elem1535 = new PartitionSpec(); - _elem1535.read(iprot); - struct.success.add(_elem1535); + _elem1543 = new PartitionSpec(); + _elem1543.read(iprot); + struct.success.add(_elem1543); } iprot.readListEnd(); } @@ -118856,9 +118997,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1537 : struct.success) + for (PartitionSpec _iter1545 : struct.success) { - _iter1537.write(oprot); + _iter1545.write(oprot); } oprot.writeListEnd(); } @@ -118905,9 +119046,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1538 : struct.success) + for (PartitionSpec _iter1546 : struct.success) { - _iter1538.write(oprot); + _iter1546.write(oprot); } } } @@ -118925,14 +119066,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1539 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1539.size); - PartitionSpec _elem1540; - for (int _i1541 = 0; _i1541 < _list1539.size; ++_i1541) + org.apache.thrift.protocol.TList _list1547 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1547.size); + PartitionSpec _elem1548; + for (int _i1549 = 0; _i1549 < _list1547.size; ++_i1549) { - _elem1540 = new PartitionSpec(); - _elem1540.read(iprot); - struct.success.add(_elem1540); + _elem1548 = new PartitionSpec(); + _elem1548.read(iprot); + struct.success.add(_elem1548); } } struct.setSuccessIsSet(true); @@ -119992,13 +120133,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1542 = iprot.readListBegin(); - struct.success = new ArrayList(_list1542.size); - String _elem1543; - for (int _i1544 = 0; _i1544 < _list1542.size; ++_i1544) + org.apache.thrift.protocol.TList _list1550 = iprot.readListBegin(); + struct.success = new ArrayList(_list1550.size); + String _elem1551; + for (int _i1552 = 0; _i1552 < _list1550.size; ++_i1552) { - _elem1543 = iprot.readString(); - struct.success.add(_elem1543); + _elem1551 = iprot.readString(); + struct.success.add(_elem1551); } iprot.readListEnd(); } @@ -120042,9 +120183,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1545 : struct.success) + for (String _iter1553 : struct.success) { - oprot.writeString(_iter1545); + oprot.writeString(_iter1553); } oprot.writeListEnd(); } @@ -120091,9 +120232,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1546 : struct.success) + for (String _iter1554 : struct.success) { - oprot.writeString(_iter1546); + oprot.writeString(_iter1554); } } } @@ -120111,13 +120252,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1547 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1547.size); - String _elem1548; - for (int _i1549 = 0; _i1549 < _list1547.size; ++_i1549) + org.apache.thrift.protocol.TList _list1555 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1555.size); + String _elem1556; + for (int _i1557 = 0; _i1557 < _list1555.size; ++_i1557) { - _elem1548 = iprot.readString(); - struct.success.add(_elem1548); + _elem1556 = iprot.readString(); + struct.success.add(_elem1556); } } struct.setSuccessIsSet(true); @@ -121648,13 +121789,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1550 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1550.size); - String _elem1551; - for (int _i1552 = 0; _i1552 < _list1550.size; ++_i1552) + org.apache.thrift.protocol.TList _list1558 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1558.size); + String _elem1559; + for (int _i1560 = 0; _i1560 < _list1558.size; ++_i1560) { - _elem1551 = iprot.readString(); - struct.part_vals.add(_elem1551); + _elem1559 = iprot.readString(); + struct.part_vals.add(_elem1559); } iprot.readListEnd(); } @@ -121698,9 +121839,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1553 : struct.part_vals) + for (String _iter1561 : struct.part_vals) { - oprot.writeString(_iter1553); + oprot.writeString(_iter1561); } oprot.writeListEnd(); } @@ -121749,9 +121890,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1554 : struct.part_vals) + for (String _iter1562 : struct.part_vals) { - oprot.writeString(_iter1554); + oprot.writeString(_iter1562); } } } @@ -121774,13 +121915,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1555 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1555.size); - String _elem1556; - for (int _i1557 = 0; _i1557 < _list1555.size; ++_i1557) + org.apache.thrift.protocol.TList _list1563 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1563.size); + String _elem1564; + for (int _i1565 = 0; _i1565 < _list1563.size; ++_i1565) { - _elem1556 = iprot.readString(); - struct.part_vals.add(_elem1556); + _elem1564 = iprot.readString(); + struct.part_vals.add(_elem1564); } } struct.setPart_valsIsSet(true); @@ -122271,14 +122412,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1558 = iprot.readListBegin(); - struct.success = new ArrayList(_list1558.size); - Partition _elem1559; - for (int _i1560 = 0; _i1560 < _list1558.size; ++_i1560) + org.apache.thrift.protocol.TList _list1566 = iprot.readListBegin(); + struct.success = new ArrayList(_list1566.size); + Partition _elem1567; + for (int _i1568 = 0; _i1568 < _list1566.size; ++_i1568) { - _elem1559 = new Partition(); - _elem1559.read(iprot); - struct.success.add(_elem1559); + _elem1567 = new Partition(); + _elem1567.read(iprot); + struct.success.add(_elem1567); } iprot.readListEnd(); } @@ -122322,9 +122463,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1561 : struct.success) + for (Partition _iter1569 : struct.success) { - _iter1561.write(oprot); + _iter1569.write(oprot); } oprot.writeListEnd(); } @@ -122371,9 +122512,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1562 : struct.success) + for (Partition _iter1570 : struct.success) { - _iter1562.write(oprot); + _iter1570.write(oprot); } } } @@ -122391,14 +122532,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1563 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1563.size); - Partition _elem1564; - for (int _i1565 = 0; _i1565 < _list1563.size; ++_i1565) + org.apache.thrift.protocol.TList _list1571 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1571.size); + Partition _elem1572; + for (int _i1573 = 0; _i1573 < _list1571.size; ++_i1573) { - _elem1564 = new Partition(); - _elem1564.read(iprot); - struct.success.add(_elem1564); + _elem1572 = new Partition(); + _elem1572.read(iprot); + struct.success.add(_elem1572); } } struct.setSuccessIsSet(true); @@ -123170,13 +123311,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1566 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1566.size); - String _elem1567; - for (int _i1568 = 0; _i1568 < _list1566.size; ++_i1568) + org.apache.thrift.protocol.TList _list1574 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1574.size); + String _elem1575; + for (int _i1576 = 0; _i1576 < _list1574.size; ++_i1576) { - _elem1567 = iprot.readString(); - struct.part_vals.add(_elem1567); + _elem1575 = iprot.readString(); + struct.part_vals.add(_elem1575); } iprot.readListEnd(); } @@ -123204,13 +123345,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1569 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1569.size); - String _elem1570; - for (int _i1571 = 0; _i1571 < _list1569.size; ++_i1571) + org.apache.thrift.protocol.TList _list1577 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1577.size); + String _elem1578; + for (int _i1579 = 0; _i1579 < _list1577.size; ++_i1579) { - _elem1570 = iprot.readString(); - struct.group_names.add(_elem1570); + _elem1578 = iprot.readString(); + struct.group_names.add(_elem1578); } iprot.readListEnd(); } @@ -123246,9 +123387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1572 : struct.part_vals) + for (String _iter1580 : struct.part_vals) { - oprot.writeString(_iter1572); + oprot.writeString(_iter1580); } oprot.writeListEnd(); } @@ -123266,9 +123407,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1573 : struct.group_names) + for (String _iter1581 : struct.group_names) { - oprot.writeString(_iter1573); + oprot.writeString(_iter1581); } oprot.writeListEnd(); } @@ -123320,9 +123461,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1574 : struct.part_vals) + for (String _iter1582 : struct.part_vals) { - oprot.writeString(_iter1574); + oprot.writeString(_iter1582); } } } @@ -123335,9 +123476,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1575 : struct.group_names) + for (String _iter1583 : struct.group_names) { - oprot.writeString(_iter1575); + oprot.writeString(_iter1583); } } } @@ -123357,13 +123498,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1576 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1576.size); - String _elem1577; - for (int _i1578 = 0; _i1578 < _list1576.size; ++_i1578) + org.apache.thrift.protocol.TList _list1584 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1584.size); + String _elem1585; + for (int _i1586 = 0; _i1586 < _list1584.size; ++_i1586) { - _elem1577 = iprot.readString(); - struct.part_vals.add(_elem1577); + _elem1585 = iprot.readString(); + struct.part_vals.add(_elem1585); } } struct.setPart_valsIsSet(true); @@ -123378,13 +123519,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1579 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1579.size); - String _elem1580; - for (int _i1581 = 0; _i1581 < _list1579.size; ++_i1581) + org.apache.thrift.protocol.TList _list1587 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1587.size); + String _elem1588; + for (int _i1589 = 0; _i1589 < _list1587.size; ++_i1589) { - _elem1580 = iprot.readString(); - struct.group_names.add(_elem1580); + _elem1588 = iprot.readString(); + struct.group_names.add(_elem1588); } } struct.setGroup_namesIsSet(true); @@ -123871,14 +124012,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1582 = iprot.readListBegin(); - struct.success = new ArrayList(_list1582.size); - Partition _elem1583; - for (int _i1584 = 0; _i1584 < _list1582.size; ++_i1584) + org.apache.thrift.protocol.TList _list1590 = iprot.readListBegin(); + struct.success = new ArrayList(_list1590.size); + Partition _elem1591; + for (int _i1592 = 0; _i1592 < _list1590.size; ++_i1592) { - _elem1583 = new Partition(); - _elem1583.read(iprot); - struct.success.add(_elem1583); + _elem1591 = new Partition(); + _elem1591.read(iprot); + struct.success.add(_elem1591); } iprot.readListEnd(); } @@ -123922,9 +124063,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1585 : struct.success) + for (Partition _iter1593 : struct.success) { - _iter1585.write(oprot); + _iter1593.write(oprot); } oprot.writeListEnd(); } @@ -123971,9 +124112,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1586 : struct.success) + for (Partition _iter1594 : struct.success) { - _iter1586.write(oprot); + _iter1594.write(oprot); } } } @@ -123991,14 +124132,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1587 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1587.size); - Partition _elem1588; - for (int _i1589 = 0; _i1589 < _list1587.size; ++_i1589) + org.apache.thrift.protocol.TList _list1595 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1595.size); + Partition _elem1596; + for (int _i1597 = 0; _i1597 < _list1595.size; ++_i1597) { - _elem1588 = new Partition(); - _elem1588.read(iprot); - struct.success.add(_elem1588); + _elem1596 = new Partition(); + _elem1596.read(iprot); + struct.success.add(_elem1596); } } struct.setSuccessIsSet(true); @@ -124591,13 +124732,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1590 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1590.size); - String _elem1591; - for (int _i1592 = 0; _i1592 < _list1590.size; ++_i1592) + org.apache.thrift.protocol.TList _list1598 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1598.size); + String _elem1599; + for (int _i1600 = 0; _i1600 < _list1598.size; ++_i1600) { - _elem1591 = iprot.readString(); - struct.part_vals.add(_elem1591); + _elem1599 = iprot.readString(); + struct.part_vals.add(_elem1599); } iprot.readListEnd(); } @@ -124641,9 +124782,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1593 : struct.part_vals) + for (String _iter1601 : struct.part_vals) { - oprot.writeString(_iter1593); + oprot.writeString(_iter1601); } oprot.writeListEnd(); } @@ -124692,9 +124833,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1594 : struct.part_vals) + for (String _iter1602 : struct.part_vals) { - oprot.writeString(_iter1594); + oprot.writeString(_iter1602); } } } @@ -124717,13 +124858,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1595 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1595.size); - String _elem1596; - for (int _i1597 = 0; _i1597 < _list1595.size; ++_i1597) + org.apache.thrift.protocol.TList _list1603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1603.size); + String _elem1604; + for (int _i1605 = 0; _i1605 < _list1603.size; ++_i1605) { - _elem1596 = iprot.readString(); - struct.part_vals.add(_elem1596); + _elem1604 = iprot.readString(); + struct.part_vals.add(_elem1604); } } struct.setPart_valsIsSet(true); @@ -125211,13 +125352,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1598 = iprot.readListBegin(); - struct.success = new ArrayList(_list1598.size); - String _elem1599; - for (int _i1600 = 0; _i1600 < _list1598.size; ++_i1600) + org.apache.thrift.protocol.TList _list1606 = iprot.readListBegin(); + struct.success = new ArrayList(_list1606.size); + String _elem1607; + for (int _i1608 = 0; _i1608 < _list1606.size; ++_i1608) { - _elem1599 = iprot.readString(); - struct.success.add(_elem1599); + _elem1607 = iprot.readString(); + struct.success.add(_elem1607); } iprot.readListEnd(); } @@ -125261,9 +125402,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1601 : struct.success) + for (String _iter1609 : struct.success) { - oprot.writeString(_iter1601); + oprot.writeString(_iter1609); } oprot.writeListEnd(); } @@ -125310,9 +125451,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1602 : struct.success) + for (String _iter1610 : struct.success) { - oprot.writeString(_iter1602); + oprot.writeString(_iter1610); } } } @@ -125330,13 +125471,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1603.size); - String _elem1604; - for (int _i1605 = 0; _i1605 < _list1603.size; ++_i1605) + org.apache.thrift.protocol.TList _list1611 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1611.size); + String _elem1612; + for (int _i1613 = 0; _i1613 < _list1611.size; ++_i1613) { - _elem1604 = iprot.readString(); - struct.success.add(_elem1604); + _elem1612 = iprot.readString(); + struct.success.add(_elem1612); } } struct.setSuccessIsSet(true); @@ -126193,13 +126334,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1606 = iprot.readListBegin(); - struct.success = new ArrayList(_list1606.size); - String _elem1607; - for (int _i1608 = 0; _i1608 < _list1606.size; ++_i1608) + org.apache.thrift.protocol.TList _list1614 = iprot.readListBegin(); + struct.success = new ArrayList(_list1614.size); + String _elem1615; + for (int _i1616 = 0; _i1616 < _list1614.size; ++_i1616) { - _elem1607 = iprot.readString(); - struct.success.add(_elem1607); + _elem1615 = iprot.readString(); + struct.success.add(_elem1615); } iprot.readListEnd(); } @@ -126243,9 +126384,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1609 : struct.success) + for (String _iter1617 : struct.success) { - oprot.writeString(_iter1609); + oprot.writeString(_iter1617); } oprot.writeListEnd(); } @@ -126292,9 +126433,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1610 : struct.success) + for (String _iter1618 : struct.success) { - oprot.writeString(_iter1610); + oprot.writeString(_iter1618); } } } @@ -126312,13 +126453,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1611 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1611.size); - String _elem1612; - for (int _i1613 = 0; _i1613 < _list1611.size; ++_i1613) + org.apache.thrift.protocol.TList _list1619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1619.size); + String _elem1620; + for (int _i1621 = 0; _i1621 < _list1619.size; ++_i1621) { - _elem1612 = iprot.readString(); - struct.success.add(_elem1612); + _elem1620 = iprot.readString(); + struct.success.add(_elem1620); } } struct.setSuccessIsSet(true); @@ -127485,14 +127626,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1614 = iprot.readListBegin(); - struct.success = new ArrayList(_list1614.size); - Partition _elem1615; - for (int _i1616 = 0; _i1616 < _list1614.size; ++_i1616) + org.apache.thrift.protocol.TList _list1622 = iprot.readListBegin(); + struct.success = new ArrayList(_list1622.size); + Partition _elem1623; + for (int _i1624 = 0; _i1624 < _list1622.size; ++_i1624) { - _elem1615 = new Partition(); - _elem1615.read(iprot); - struct.success.add(_elem1615); + _elem1623 = new Partition(); + _elem1623.read(iprot); + struct.success.add(_elem1623); } iprot.readListEnd(); } @@ -127536,9 +127677,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1617 : struct.success) + for (Partition _iter1625 : struct.success) { - _iter1617.write(oprot); + _iter1625.write(oprot); } oprot.writeListEnd(); } @@ -127585,9 +127726,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1618 : struct.success) + for (Partition _iter1626 : struct.success) { - _iter1618.write(oprot); + _iter1626.write(oprot); } } } @@ -127605,14 +127746,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1619.size); - Partition _elem1620; - for (int _i1621 = 0; _i1621 < _list1619.size; ++_i1621) + org.apache.thrift.protocol.TList _list1627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1627.size); + Partition _elem1628; + for (int _i1629 = 0; _i1629 < _list1627.size; ++_i1629) { - _elem1620 = new Partition(); - _elem1620.read(iprot); - struct.success.add(_elem1620); + _elem1628 = new Partition(); + _elem1628.read(iprot); + struct.success.add(_elem1628); } } struct.setSuccessIsSet(true); @@ -128779,14 +128920,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1622 = iprot.readListBegin(); - struct.success = new ArrayList(_list1622.size); - PartitionSpec _elem1623; - for (int _i1624 = 0; _i1624 < _list1622.size; ++_i1624) + org.apache.thrift.protocol.TList _list1630 = iprot.readListBegin(); + struct.success = new ArrayList(_list1630.size); + PartitionSpec _elem1631; + for (int _i1632 = 0; _i1632 < _list1630.size; ++_i1632) { - _elem1623 = new PartitionSpec(); - _elem1623.read(iprot); - struct.success.add(_elem1623); + _elem1631 = new PartitionSpec(); + _elem1631.read(iprot); + struct.success.add(_elem1631); } iprot.readListEnd(); } @@ -128830,9 +128971,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1625 : struct.success) + for (PartitionSpec _iter1633 : struct.success) { - _iter1625.write(oprot); + _iter1633.write(oprot); } oprot.writeListEnd(); } @@ -128879,9 +129020,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1626 : struct.success) + for (PartitionSpec _iter1634 : struct.success) { - _iter1626.write(oprot); + _iter1634.write(oprot); } } } @@ -128899,14 +129040,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1627.size); - PartitionSpec _elem1628; - for (int _i1629 = 0; _i1629 < _list1627.size; ++_i1629) + org.apache.thrift.protocol.TList _list1635 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1635.size); + PartitionSpec _elem1636; + for (int _i1637 = 0; _i1637 < _list1635.size; ++_i1637) { - _elem1628 = new PartitionSpec(); - _elem1628.read(iprot); - struct.success.add(_elem1628); + _elem1636 = new PartitionSpec(); + _elem1636.read(iprot); + struct.success.add(_elem1636); } } struct.setSuccessIsSet(true); @@ -132428,13 +132569,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1630 = iprot.readListBegin(); - struct.names = new ArrayList(_list1630.size); - String _elem1631; - for (int _i1632 = 0; _i1632 < _list1630.size; ++_i1632) + org.apache.thrift.protocol.TList _list1638 = iprot.readListBegin(); + struct.names = new ArrayList(_list1638.size); + String _elem1639; + for (int _i1640 = 0; _i1640 < _list1638.size; ++_i1640) { - _elem1631 = iprot.readString(); - struct.names.add(_elem1631); + _elem1639 = iprot.readString(); + struct.names.add(_elem1639); } iprot.readListEnd(); } @@ -132470,9 +132611,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1633 : struct.names) + for (String _iter1641 : struct.names) { - oprot.writeString(_iter1633); + oprot.writeString(_iter1641); } oprot.writeListEnd(); } @@ -132515,9 +132656,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1634 : struct.names) + for (String _iter1642 : struct.names) { - oprot.writeString(_iter1634); + oprot.writeString(_iter1642); } } } @@ -132537,13 +132678,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1635 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1635.size); - String _elem1636; - for (int _i1637 = 0; _i1637 < _list1635.size; ++_i1637) + org.apache.thrift.protocol.TList _list1643 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1643.size); + String _elem1644; + for (int _i1645 = 0; _i1645 < _list1643.size; ++_i1645) { - _elem1636 = iprot.readString(); - struct.names.add(_elem1636); + _elem1644 = iprot.readString(); + struct.names.add(_elem1644); } } struct.setNamesIsSet(true); @@ -133030,14 +133171,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1638 = iprot.readListBegin(); - struct.success = new ArrayList(_list1638.size); - Partition _elem1639; - for (int _i1640 = 0; _i1640 < _list1638.size; ++_i1640) + org.apache.thrift.protocol.TList _list1646 = iprot.readListBegin(); + struct.success = new ArrayList(_list1646.size); + Partition _elem1647; + for (int _i1648 = 0; _i1648 < _list1646.size; ++_i1648) { - _elem1639 = new Partition(); - _elem1639.read(iprot); - struct.success.add(_elem1639); + _elem1647 = new Partition(); + _elem1647.read(iprot); + struct.success.add(_elem1647); } iprot.readListEnd(); } @@ -133081,9 +133222,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1641 : struct.success) + for (Partition _iter1649 : struct.success) { - _iter1641.write(oprot); + _iter1649.write(oprot); } oprot.writeListEnd(); } @@ -133130,9 +133271,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1642 : struct.success) + for (Partition _iter1650 : struct.success) { - _iter1642.write(oprot); + _iter1650.write(oprot); } } } @@ -133150,14 +133291,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1643 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1643.size); - Partition _elem1644; - for (int _i1645 = 0; _i1645 < _list1643.size; ++_i1645) + org.apache.thrift.protocol.TList _list1651 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1651.size); + Partition _elem1652; + for (int _i1653 = 0; _i1653 < _list1651.size; ++_i1653) { - _elem1644 = new Partition(); - _elem1644.read(iprot); - struct.success.add(_elem1644); + _elem1652 = new Partition(); + _elem1652.read(iprot); + struct.success.add(_elem1652); } } struct.setSuccessIsSet(true); @@ -135645,14 +135786,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1646 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1646.size); - Partition _elem1647; - for (int _i1648 = 0; _i1648 < _list1646.size; ++_i1648) + org.apache.thrift.protocol.TList _list1654 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1654.size); + Partition _elem1655; + for (int _i1656 = 0; _i1656 < _list1654.size; ++_i1656) { - _elem1647 = new Partition(); - _elem1647.read(iprot); - struct.new_parts.add(_elem1647); + _elem1655 = new Partition(); + _elem1655.read(iprot); + struct.new_parts.add(_elem1655); } iprot.readListEnd(); } @@ -135688,9 +135829,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1649 : struct.new_parts) + for (Partition _iter1657 : struct.new_parts) { - _iter1649.write(oprot); + _iter1657.write(oprot); } oprot.writeListEnd(); } @@ -135733,9 +135874,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1650 : struct.new_parts) + for (Partition _iter1658 : struct.new_parts) { - _iter1650.write(oprot); + _iter1658.write(oprot); } } } @@ -135755,14 +135896,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1651 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1651.size); - Partition _elem1652; - for (int _i1653 = 0; _i1653 < _list1651.size; ++_i1653) + org.apache.thrift.protocol.TList _list1659 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1659.size); + Partition _elem1660; + for (int _i1661 = 0; _i1661 < _list1659.size; ++_i1661) { - _elem1652 = new Partition(); - _elem1652.read(iprot); - struct.new_parts.add(_elem1652); + _elem1660 = new Partition(); + _elem1660.read(iprot); + struct.new_parts.add(_elem1660); } } struct.setNew_partsIsSet(true); @@ -136815,14 +136956,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1654 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1654.size); - Partition _elem1655; - for (int _i1656 = 0; _i1656 < _list1654.size; ++_i1656) + org.apache.thrift.protocol.TList _list1662 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1662.size); + Partition _elem1663; + for (int _i1664 = 0; _i1664 < _list1662.size; ++_i1664) { - _elem1655 = new Partition(); - _elem1655.read(iprot); - struct.new_parts.add(_elem1655); + _elem1663 = new Partition(); + _elem1663.read(iprot); + struct.new_parts.add(_elem1663); } iprot.readListEnd(); } @@ -136867,9 +137008,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1657 : struct.new_parts) + for (Partition _iter1665 : struct.new_parts) { - _iter1657.write(oprot); + _iter1665.write(oprot); } oprot.writeListEnd(); } @@ -136920,9 +137061,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1658 : struct.new_parts) + for (Partition _iter1666 : struct.new_parts) { - _iter1658.write(oprot); + _iter1666.write(oprot); } } } @@ -136945,14 +137086,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1659 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1659.size); - Partition _elem1660; - for (int _i1661 = 0; _i1661 < _list1659.size; ++_i1661) + org.apache.thrift.protocol.TList _list1667 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1667.size); + Partition _elem1668; + for (int _i1669 = 0; _i1669 < _list1667.size; ++_i1669) { - _elem1660 = new Partition(); - _elem1660.read(iprot); - struct.new_parts.add(_elem1660); + _elem1668 = new Partition(); + _elem1668.read(iprot); + struct.new_parts.add(_elem1668); } } struct.setNew_partsIsSet(true); @@ -140091,13 +140232,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1662 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1662.size); - String _elem1663; - for (int _i1664 = 0; _i1664 < _list1662.size; ++_i1664) + org.apache.thrift.protocol.TList _list1670 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1670.size); + String _elem1671; + for (int _i1672 = 0; _i1672 < _list1670.size; ++_i1672) { - _elem1663 = iprot.readString(); - struct.part_vals.add(_elem1663); + _elem1671 = iprot.readString(); + struct.part_vals.add(_elem1671); } iprot.readListEnd(); } @@ -140142,9 +140283,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1665 : struct.part_vals) + for (String _iter1673 : struct.part_vals) { - oprot.writeString(_iter1665); + oprot.writeString(_iter1673); } oprot.writeListEnd(); } @@ -140195,9 +140336,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1666 : struct.part_vals) + for (String _iter1674 : struct.part_vals) { - oprot.writeString(_iter1666); + oprot.writeString(_iter1674); } } } @@ -140220,13 +140361,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1667 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1667.size); - String _elem1668; - for (int _i1669 = 0; _i1669 < _list1667.size; ++_i1669) + org.apache.thrift.protocol.TList _list1675 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1675.size); + String _elem1676; + for (int _i1677 = 0; _i1677 < _list1675.size; ++_i1677) { - _elem1668 = iprot.readString(); - struct.part_vals.add(_elem1668); + _elem1676 = iprot.readString(); + struct.part_vals.add(_elem1676); } } struct.setPart_valsIsSet(true); @@ -142038,13 +142179,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1670 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1670.size); - String _elem1671; - for (int _i1672 = 0; _i1672 < _list1670.size; ++_i1672) + org.apache.thrift.protocol.TList _list1678 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1678.size); + String _elem1679; + for (int _i1680 = 0; _i1680 < _list1678.size; ++_i1680) { - _elem1671 = iprot.readString(); - struct.part_vals.add(_elem1671); + _elem1679 = iprot.readString(); + struct.part_vals.add(_elem1679); } iprot.readListEnd(); } @@ -142078,9 +142219,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1673 : struct.part_vals) + for (String _iter1681 : struct.part_vals) { - oprot.writeString(_iter1673); + oprot.writeString(_iter1681); } oprot.writeListEnd(); } @@ -142117,9 +142258,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1674 : struct.part_vals) + for (String _iter1682 : struct.part_vals) { - oprot.writeString(_iter1674); + oprot.writeString(_iter1682); } } } @@ -142134,13 +142275,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1675 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1675.size); - String _elem1676; - for (int _i1677 = 0; _i1677 < _list1675.size; ++_i1677) + org.apache.thrift.protocol.TList _list1683 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1683.size); + String _elem1684; + for (int _i1685 = 0; _i1685 < _list1683.size; ++_i1685) { - _elem1676 = iprot.readString(); - struct.part_vals.add(_elem1676); + _elem1684 = iprot.readString(); + struct.part_vals.add(_elem1684); } } struct.setPart_valsIsSet(true); @@ -144295,13 +144436,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1678 = iprot.readListBegin(); - struct.success = new ArrayList(_list1678.size); - String _elem1679; - for (int _i1680 = 0; _i1680 < _list1678.size; ++_i1680) + org.apache.thrift.protocol.TList _list1686 = iprot.readListBegin(); + struct.success = new ArrayList(_list1686.size); + String _elem1687; + for (int _i1688 = 0; _i1688 < _list1686.size; ++_i1688) { - _elem1679 = iprot.readString(); - struct.success.add(_elem1679); + _elem1687 = iprot.readString(); + struct.success.add(_elem1687); } iprot.readListEnd(); } @@ -144336,9 +144477,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1681 : struct.success) + for (String _iter1689 : struct.success) { - oprot.writeString(_iter1681); + oprot.writeString(_iter1689); } oprot.writeListEnd(); } @@ -144377,9 +144518,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1682 : struct.success) + for (String _iter1690 : struct.success) { - oprot.writeString(_iter1682); + oprot.writeString(_iter1690); } } } @@ -144394,13 +144535,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1683 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1683.size); - String _elem1684; - for (int _i1685 = 0; _i1685 < _list1683.size; ++_i1685) + org.apache.thrift.protocol.TList _list1691 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1691.size); + String _elem1692; + for (int _i1693 = 0; _i1693 < _list1691.size; ++_i1693) { - _elem1684 = iprot.readString(); - struct.success.add(_elem1684); + _elem1692 = iprot.readString(); + struct.success.add(_elem1692); } } struct.setSuccessIsSet(true); @@ -145163,15 +145304,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1686 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1686.size); - String _key1687; - String _val1688; - for (int _i1689 = 0; _i1689 < _map1686.size; ++_i1689) + org.apache.thrift.protocol.TMap _map1694 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1694.size); + String _key1695; + String _val1696; + for (int _i1697 = 0; _i1697 < _map1694.size; ++_i1697) { - _key1687 = iprot.readString(); - _val1688 = iprot.readString(); - struct.success.put(_key1687, _val1688); + _key1695 = iprot.readString(); + _val1696 = iprot.readString(); + struct.success.put(_key1695, _val1696); } iprot.readMapEnd(); } @@ -145206,10 +145347,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1690 : struct.success.entrySet()) + for (Map.Entry _iter1698 : struct.success.entrySet()) { - oprot.writeString(_iter1690.getKey()); - oprot.writeString(_iter1690.getValue()); + oprot.writeString(_iter1698.getKey()); + oprot.writeString(_iter1698.getValue()); } oprot.writeMapEnd(); } @@ -145248,10 +145389,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1691 : struct.success.entrySet()) + for (Map.Entry _iter1699 : struct.success.entrySet()) { - oprot.writeString(_iter1691.getKey()); - oprot.writeString(_iter1691.getValue()); + oprot.writeString(_iter1699.getKey()); + oprot.writeString(_iter1699.getValue()); } } } @@ -145266,15 +145407,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1692 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1692.size); - String _key1693; - String _val1694; - for (int _i1695 = 0; _i1695 < _map1692.size; ++_i1695) + org.apache.thrift.protocol.TMap _map1700 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1700.size); + String _key1701; + String _val1702; + for (int _i1703 = 0; _i1703 < _map1700.size; ++_i1703) { - _key1693 = iprot.readString(); - _val1694 = iprot.readString(); - struct.success.put(_key1693, _val1694); + _key1701 = iprot.readString(); + _val1702 = iprot.readString(); + struct.success.put(_key1701, _val1702); } } struct.setSuccessIsSet(true); @@ -145869,15 +146010,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1696 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1696.size); - String _key1697; - String _val1698; - for (int _i1699 = 0; _i1699 < _map1696.size; ++_i1699) + org.apache.thrift.protocol.TMap _map1704 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1704.size); + String _key1705; + String _val1706; + for (int _i1707 = 0; _i1707 < _map1704.size; ++_i1707) { - _key1697 = iprot.readString(); - _val1698 = iprot.readString(); - struct.part_vals.put(_key1697, _val1698); + _key1705 = iprot.readString(); + _val1706 = iprot.readString(); + struct.part_vals.put(_key1705, _val1706); } iprot.readMapEnd(); } @@ -145921,10 +146062,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1700 : struct.part_vals.entrySet()) + for (Map.Entry _iter1708 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1700.getKey()); - oprot.writeString(_iter1700.getValue()); + oprot.writeString(_iter1708.getKey()); + oprot.writeString(_iter1708.getValue()); } oprot.writeMapEnd(); } @@ -145975,10 +146116,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1701 : struct.part_vals.entrySet()) + for (Map.Entry _iter1709 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1701.getKey()); - oprot.writeString(_iter1701.getValue()); + oprot.writeString(_iter1709.getKey()); + oprot.writeString(_iter1709.getValue()); } } } @@ -146001,15 +146142,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1702 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1702.size); - String _key1703; - String _val1704; - for (int _i1705 = 0; _i1705 < _map1702.size; ++_i1705) + org.apache.thrift.protocol.TMap _map1710 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1710.size); + String _key1711; + String _val1712; + for (int _i1713 = 0; _i1713 < _map1710.size; ++_i1713) { - _key1703 = iprot.readString(); - _val1704 = iprot.readString(); - struct.part_vals.put(_key1703, _val1704); + _key1711 = iprot.readString(); + _val1712 = iprot.readString(); + struct.part_vals.put(_key1711, _val1712); } } struct.setPart_valsIsSet(true); @@ -147493,15 +147634,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1706 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1706.size); - String _key1707; - String _val1708; - for (int _i1709 = 0; _i1709 < _map1706.size; ++_i1709) + org.apache.thrift.protocol.TMap _map1714 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1714.size); + String _key1715; + String _val1716; + for (int _i1717 = 0; _i1717 < _map1714.size; ++_i1717) { - _key1707 = iprot.readString(); - _val1708 = iprot.readString(); - struct.part_vals.put(_key1707, _val1708); + _key1715 = iprot.readString(); + _val1716 = iprot.readString(); + struct.part_vals.put(_key1715, _val1716); } iprot.readMapEnd(); } @@ -147545,10 +147686,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1710 : struct.part_vals.entrySet()) + for (Map.Entry _iter1718 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1710.getKey()); - oprot.writeString(_iter1710.getValue()); + oprot.writeString(_iter1718.getKey()); + oprot.writeString(_iter1718.getValue()); } oprot.writeMapEnd(); } @@ -147599,10 +147740,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1711 : struct.part_vals.entrySet()) + for (Map.Entry _iter1719 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1711.getKey()); - oprot.writeString(_iter1711.getValue()); + oprot.writeString(_iter1719.getKey()); + oprot.writeString(_iter1719.getValue()); } } } @@ -147625,15 +147766,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1712 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1712.size); - String _key1713; - String _val1714; - for (int _i1715 = 0; _i1715 < _map1712.size; ++_i1715) + org.apache.thrift.protocol.TMap _map1720 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1720.size); + String _key1721; + String _val1722; + for (int _i1723 = 0; _i1723 < _map1720.size; ++_i1723) { - _key1713 = iprot.readString(); - _val1714 = iprot.readString(); - struct.part_vals.put(_key1713, _val1714); + _key1721 = iprot.readString(); + _val1722 = iprot.readString(); + struct.part_vals.put(_key1721, _val1722); } } struct.setPart_valsIsSet(true); @@ -172497,13 +172638,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1716 = iprot.readListBegin(); - struct.success = new ArrayList(_list1716.size); - String _elem1717; - for (int _i1718 = 0; _i1718 < _list1716.size; ++_i1718) + org.apache.thrift.protocol.TList _list1724 = iprot.readListBegin(); + struct.success = new ArrayList(_list1724.size); + String _elem1725; + for (int _i1726 = 0; _i1726 < _list1724.size; ++_i1726) { - _elem1717 = iprot.readString(); - struct.success.add(_elem1717); + _elem1725 = iprot.readString(); + struct.success.add(_elem1725); } iprot.readListEnd(); } @@ -172538,9 +172679,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1719 : struct.success) + for (String _iter1727 : struct.success) { - oprot.writeString(_iter1719); + oprot.writeString(_iter1727); } oprot.writeListEnd(); } @@ -172579,9 +172720,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1720 : struct.success) + for (String _iter1728 : struct.success) { - oprot.writeString(_iter1720); + oprot.writeString(_iter1728); } } } @@ -172596,13 +172737,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1721.size); - String _elem1722; - for (int _i1723 = 0; _i1723 < _list1721.size; ++_i1723) + org.apache.thrift.protocol.TList _list1729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1729.size); + String _elem1730; + for (int _i1731 = 0; _i1731 < _list1729.size; ++_i1731) { - _elem1722 = iprot.readString(); - struct.success.add(_elem1722); + _elem1730 = iprot.readString(); + struct.success.add(_elem1730); } } struct.setSuccessIsSet(true); @@ -176657,13 +176798,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1724 = iprot.readListBegin(); - struct.success = new ArrayList(_list1724.size); - String _elem1725; - for (int _i1726 = 0; _i1726 < _list1724.size; ++_i1726) + org.apache.thrift.protocol.TList _list1732 = iprot.readListBegin(); + struct.success = new ArrayList(_list1732.size); + String _elem1733; + for (int _i1734 = 0; _i1734 < _list1732.size; ++_i1734) { - _elem1725 = iprot.readString(); - struct.success.add(_elem1725); + _elem1733 = iprot.readString(); + struct.success.add(_elem1733); } iprot.readListEnd(); } @@ -176698,9 +176839,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1727 : struct.success) + for (String _iter1735 : struct.success) { - oprot.writeString(_iter1727); + oprot.writeString(_iter1735); } oprot.writeListEnd(); } @@ -176739,9 +176880,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1728 : struct.success) + for (String _iter1736 : struct.success) { - oprot.writeString(_iter1728); + oprot.writeString(_iter1736); } } } @@ -176756,13 +176897,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1729.size); - String _elem1730; - for (int _i1731 = 0; _i1731 < _list1729.size; ++_i1731) + org.apache.thrift.protocol.TList _list1737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1737.size); + String _elem1738; + for (int _i1739 = 0; _i1739 < _list1737.size; ++_i1739) { - _elem1730 = iprot.readString(); - struct.success.add(_elem1730); + _elem1738 = iprot.readString(); + struct.success.add(_elem1738); } } struct.setSuccessIsSet(true); @@ -180053,14 +180194,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1732 = iprot.readListBegin(); - struct.success = new ArrayList(_list1732.size); - Role _elem1733; - for (int _i1734 = 0; _i1734 < _list1732.size; ++_i1734) + org.apache.thrift.protocol.TList _list1740 = iprot.readListBegin(); + struct.success = new ArrayList(_list1740.size); + Role _elem1741; + for (int _i1742 = 0; _i1742 < _list1740.size; ++_i1742) { - _elem1733 = new Role(); - _elem1733.read(iprot); - struct.success.add(_elem1733); + _elem1741 = new Role(); + _elem1741.read(iprot); + struct.success.add(_elem1741); } iprot.readListEnd(); } @@ -180095,9 +180236,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1735 : struct.success) + for (Role _iter1743 : struct.success) { - _iter1735.write(oprot); + _iter1743.write(oprot); } oprot.writeListEnd(); } @@ -180136,9 +180277,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1736 : struct.success) + for (Role _iter1744 : struct.success) { - _iter1736.write(oprot); + _iter1744.write(oprot); } } } @@ -180153,14 +180294,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1737.size); - Role _elem1738; - for (int _i1739 = 0; _i1739 < _list1737.size; ++_i1739) + org.apache.thrift.protocol.TList _list1745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1745.size); + Role _elem1746; + for (int _i1747 = 0; _i1747 < _list1745.size; ++_i1747) { - _elem1738 = new Role(); - _elem1738.read(iprot); - struct.success.add(_elem1738); + _elem1746 = new Role(); + _elem1746.read(iprot); + struct.success.add(_elem1746); } } struct.setSuccessIsSet(true); @@ -183165,13 +183306,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1740 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1740.size); - String _elem1741; - for (int _i1742 = 0; _i1742 < _list1740.size; ++_i1742) + org.apache.thrift.protocol.TList _list1748 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1748.size); + String _elem1749; + for (int _i1750 = 0; _i1750 < _list1748.size; ++_i1750) { - _elem1741 = iprot.readString(); - struct.group_names.add(_elem1741); + _elem1749 = iprot.readString(); + struct.group_names.add(_elem1749); } iprot.readListEnd(); } @@ -183207,9 +183348,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1743 : struct.group_names) + for (String _iter1751 : struct.group_names) { - oprot.writeString(_iter1743); + oprot.writeString(_iter1751); } oprot.writeListEnd(); } @@ -183252,9 +183393,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1744 : struct.group_names) + for (String _iter1752 : struct.group_names) { - oprot.writeString(_iter1744); + oprot.writeString(_iter1752); } } } @@ -183275,13 +183416,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1745.size); - String _elem1746; - for (int _i1747 = 0; _i1747 < _list1745.size; ++_i1747) + org.apache.thrift.protocol.TList _list1753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1753.size); + String _elem1754; + for (int _i1755 = 0; _i1755 < _list1753.size; ++_i1755) { - _elem1746 = iprot.readString(); - struct.group_names.add(_elem1746); + _elem1754 = iprot.readString(); + struct.group_names.add(_elem1754); } } struct.setGroup_namesIsSet(true); @@ -184739,14 +184880,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1748 = iprot.readListBegin(); - struct.success = new ArrayList(_list1748.size); - HiveObjectPrivilege _elem1749; - for (int _i1750 = 0; _i1750 < _list1748.size; ++_i1750) + org.apache.thrift.protocol.TList _list1756 = iprot.readListBegin(); + struct.success = new ArrayList(_list1756.size); + HiveObjectPrivilege _elem1757; + for (int _i1758 = 0; _i1758 < _list1756.size; ++_i1758) { - _elem1749 = new HiveObjectPrivilege(); - _elem1749.read(iprot); - struct.success.add(_elem1749); + _elem1757 = new HiveObjectPrivilege(); + _elem1757.read(iprot); + struct.success.add(_elem1757); } iprot.readListEnd(); } @@ -184781,9 +184922,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1751 : struct.success) + for (HiveObjectPrivilege _iter1759 : struct.success) { - _iter1751.write(oprot); + _iter1759.write(oprot); } oprot.writeListEnd(); } @@ -184822,9 +184963,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1752 : struct.success) + for (HiveObjectPrivilege _iter1760 : struct.success) { - _iter1752.write(oprot); + _iter1760.write(oprot); } } } @@ -184839,14 +184980,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1753.size); - HiveObjectPrivilege _elem1754; - for (int _i1755 = 0; _i1755 < _list1753.size; ++_i1755) + org.apache.thrift.protocol.TList _list1761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1761.size); + HiveObjectPrivilege _elem1762; + for (int _i1763 = 0; _i1763 < _list1761.size; ++_i1763) { - _elem1754 = new HiveObjectPrivilege(); - _elem1754.read(iprot); - struct.success.add(_elem1754); + _elem1762 = new HiveObjectPrivilege(); + _elem1762.read(iprot); + struct.success.add(_elem1762); } } struct.setSuccessIsSet(true); @@ -188793,13 +188934,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1756 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1756.size); - String _elem1757; - for (int _i1758 = 0; _i1758 < _list1756.size; ++_i1758) + org.apache.thrift.protocol.TList _list1764 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1764.size); + String _elem1765; + for (int _i1766 = 0; _i1766 < _list1764.size; ++_i1766) { - _elem1757 = iprot.readString(); - struct.group_names.add(_elem1757); + _elem1765 = iprot.readString(); + struct.group_names.add(_elem1765); } iprot.readListEnd(); } @@ -188830,9 +188971,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1759 : struct.group_names) + for (String _iter1767 : struct.group_names) { - oprot.writeString(_iter1759); + oprot.writeString(_iter1767); } oprot.writeListEnd(); } @@ -188869,9 +189010,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1760 : struct.group_names) + for (String _iter1768 : struct.group_names) { - oprot.writeString(_iter1760); + oprot.writeString(_iter1768); } } } @@ -188887,13 +189028,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1761.size); - String _elem1762; - for (int _i1763 = 0; _i1763 < _list1761.size; ++_i1763) + org.apache.thrift.protocol.TList _list1769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1769.size); + String _elem1770; + for (int _i1771 = 0; _i1771 < _list1769.size; ++_i1771) { - _elem1762 = iprot.readString(); - struct.group_names.add(_elem1762); + _elem1770 = iprot.readString(); + struct.group_names.add(_elem1770); } } struct.setGroup_namesIsSet(true); @@ -189296,13 +189437,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1764 = iprot.readListBegin(); - struct.success = new ArrayList(_list1764.size); - String _elem1765; - for (int _i1766 = 0; _i1766 < _list1764.size; ++_i1766) + org.apache.thrift.protocol.TList _list1772 = iprot.readListBegin(); + struct.success = new ArrayList(_list1772.size); + String _elem1773; + for (int _i1774 = 0; _i1774 < _list1772.size; ++_i1774) { - _elem1765 = iprot.readString(); - struct.success.add(_elem1765); + _elem1773 = iprot.readString(); + struct.success.add(_elem1773); } iprot.readListEnd(); } @@ -189337,9 +189478,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1767 : struct.success) + for (String _iter1775 : struct.success) { - oprot.writeString(_iter1767); + oprot.writeString(_iter1775); } oprot.writeListEnd(); } @@ -189378,9 +189519,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1768 : struct.success) + for (String _iter1776 : struct.success) { - oprot.writeString(_iter1768); + oprot.writeString(_iter1776); } } } @@ -189395,13 +189536,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1769.size); - String _elem1770; - for (int _i1771 = 0; _i1771 < _list1769.size; ++_i1771) + org.apache.thrift.protocol.TList _list1777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1777.size); + String _elem1778; + for (int _i1779 = 0; _i1779 < _list1777.size; ++_i1779) { - _elem1770 = iprot.readString(); - struct.success.add(_elem1770); + _elem1778 = iprot.readString(); + struct.success.add(_elem1778); } } struct.setSuccessIsSet(true); @@ -194692,13 +194833,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1772 = iprot.readListBegin(); - struct.success = new ArrayList(_list1772.size); - String _elem1773; - for (int _i1774 = 0; _i1774 < _list1772.size; ++_i1774) + org.apache.thrift.protocol.TList _list1780 = iprot.readListBegin(); + struct.success = new ArrayList(_list1780.size); + String _elem1781; + for (int _i1782 = 0; _i1782 < _list1780.size; ++_i1782) { - _elem1773 = iprot.readString(); - struct.success.add(_elem1773); + _elem1781 = iprot.readString(); + struct.success.add(_elem1781); } iprot.readListEnd(); } @@ -194724,9 +194865,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1775 : struct.success) + for (String _iter1783 : struct.success) { - oprot.writeString(_iter1775); + oprot.writeString(_iter1783); } oprot.writeListEnd(); } @@ -194757,9 +194898,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1776 : struct.success) + for (String _iter1784 : struct.success) { - oprot.writeString(_iter1776); + oprot.writeString(_iter1784); } } } @@ -194771,13 +194912,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1777.size); - String _elem1778; - for (int _i1779 = 0; _i1779 < _list1777.size; ++_i1779) + org.apache.thrift.protocol.TList _list1785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1785.size); + String _elem1786; + for (int _i1787 = 0; _i1787 < _list1785.size; ++_i1787) { - _elem1778 = iprot.readString(); - struct.success.add(_elem1778); + _elem1786 = iprot.readString(); + struct.success.add(_elem1786); } } struct.setSuccessIsSet(true); @@ -197807,13 +197948,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1780 = iprot.readListBegin(); - struct.success = new ArrayList(_list1780.size); - String _elem1781; - for (int _i1782 = 0; _i1782 < _list1780.size; ++_i1782) + org.apache.thrift.protocol.TList _list1788 = iprot.readListBegin(); + struct.success = new ArrayList(_list1788.size); + String _elem1789; + for (int _i1790 = 0; _i1790 < _list1788.size; ++_i1790) { - _elem1781 = iprot.readString(); - struct.success.add(_elem1781); + _elem1789 = iprot.readString(); + struct.success.add(_elem1789); } iprot.readListEnd(); } @@ -197839,9 +197980,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1783 : struct.success) + for (String _iter1791 : struct.success) { - oprot.writeString(_iter1783); + oprot.writeString(_iter1791); } oprot.writeListEnd(); } @@ -197872,9 +198013,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1784 : struct.success) + for (String _iter1792 : struct.success) { - oprot.writeString(_iter1784); + oprot.writeString(_iter1792); } } } @@ -197886,13 +198027,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1785.size); - String _elem1786; - for (int _i1787 = 0; _i1787 < _list1785.size; ++_i1787) + org.apache.thrift.protocol.TList _list1793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1793.size); + String _elem1794; + for (int _i1795 = 0; _i1795 < _list1793.size; ++_i1795) { - _elem1786 = iprot.readString(); - struct.success.add(_elem1786); + _elem1794 = iprot.readString(); + struct.success.add(_elem1794); } } struct.setSuccessIsSet(true); @@ -215013,13 +215154,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, find_columns_with_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1788 = iprot.readListBegin(); - struct.success = new ArrayList(_list1788.size); - String _elem1789; - for (int _i1790 = 0; _i1790 < _list1788.size; ++_i1790) + org.apache.thrift.protocol.TList _list1796 = iprot.readListBegin(); + struct.success = new ArrayList(_list1796.size); + String _elem1797; + for (int _i1798 = 0; _i1798 < _list1796.size; ++_i1798) { - _elem1789 = iprot.readString(); - struct.success.add(_elem1789); + _elem1797 = iprot.readString(); + struct.success.add(_elem1797); } iprot.readListEnd(); } @@ -215045,9 +215186,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, find_columns_with_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1791 : struct.success) + for (String _iter1799 : struct.success) { - oprot.writeString(_iter1791); + oprot.writeString(_iter1799); } oprot.writeListEnd(); } @@ -215078,9 +215219,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, find_columns_with_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1792 : struct.success) + for (String _iter1800 : struct.success) { - oprot.writeString(_iter1792); + oprot.writeString(_iter1800); } } } @@ -215092,13 +215233,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, find_columns_with_st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1793.size); - String _elem1794; - for (int _i1795 = 0; _i1795 < _list1793.size; ++_i1795) + org.apache.thrift.protocol.TList _list1801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1801.size); + String _elem1802; + for (int _i1803 = 0; _i1803 < _list1801.size; ++_i1803) { - _elem1794 = iprot.readString(); - struct.success.add(_elem1794); + _elem1802 = iprot.readString(); + struct.success.add(_elem1802); } } struct.setSuccessIsSet(true); @@ -251984,14 +252125,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_all_vers case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1796 = iprot.readListBegin(); - struct.success = new ArrayList(_list1796.size); - SchemaVersion _elem1797; - for (int _i1798 = 0; _i1798 < _list1796.size; ++_i1798) + org.apache.thrift.protocol.TList _list1804 = iprot.readListBegin(); + struct.success = new ArrayList(_list1804.size); + SchemaVersion _elem1805; + for (int _i1806 = 0; _i1806 < _list1804.size; ++_i1806) { - _elem1797 = new SchemaVersion(); - _elem1797.read(iprot); - struct.success.add(_elem1797); + _elem1805 = new SchemaVersion(); + _elem1805.read(iprot); + struct.success.add(_elem1805); } iprot.readListEnd(); } @@ -252035,9 +252176,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_all_ver oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (SchemaVersion _iter1799 : struct.success) + for (SchemaVersion _iter1807 : struct.success) { - _iter1799.write(oprot); + _iter1807.write(oprot); } oprot.writeListEnd(); } @@ -252084,9 +252225,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_all_vers if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (SchemaVersion _iter1800 : struct.success) + for (SchemaVersion _iter1808 : struct.success) { - _iter1800.write(oprot); + _iter1808.write(oprot); } } } @@ -252104,14 +252245,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_all_versi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1801.size); - SchemaVersion _elem1802; - for (int _i1803 = 0; _i1803 < _list1801.size; ++_i1803) + org.apache.thrift.protocol.TList _list1809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1809.size); + SchemaVersion _elem1810; + for (int _i1811 = 0; _i1811 < _list1809.size; ++_i1811) { - _elem1802 = new SchemaVersion(); - _elem1802.read(iprot); - struct.success.add(_elem1802); + _elem1810 = new SchemaVersion(); + _elem1810.read(iprot); + struct.success.add(_elem1810); } } struct.setSuccessIsSet(true); @@ -260654,14 +260795,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1804 = iprot.readListBegin(); - struct.success = new ArrayList(_list1804.size); - RuntimeStat _elem1805; - for (int _i1806 = 0; _i1806 < _list1804.size; ++_i1806) + org.apache.thrift.protocol.TList _list1812 = iprot.readListBegin(); + struct.success = new ArrayList(_list1812.size); + RuntimeStat _elem1813; + for (int _i1814 = 0; _i1814 < _list1812.size; ++_i1814) { - _elem1805 = new RuntimeStat(); - _elem1805.read(iprot); - struct.success.add(_elem1805); + _elem1813 = new RuntimeStat(); + _elem1813.read(iprot); + struct.success.add(_elem1813); } iprot.readListEnd(); } @@ -260696,9 +260837,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (RuntimeStat _iter1807 : struct.success) + for (RuntimeStat _iter1815 : struct.success) { - _iter1807.write(oprot); + _iter1815.write(oprot); } oprot.writeListEnd(); } @@ -260737,9 +260878,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (RuntimeStat _iter1808 : struct.success) + for (RuntimeStat _iter1816 : struct.success) { - _iter1808.write(oprot); + _iter1816.write(oprot); } } } @@ -260754,14 +260895,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1809.size); - RuntimeStat _elem1810; - for (int _i1811 = 0; _i1811 < _list1809.size; ++_i1811) + org.apache.thrift.protocol.TList _list1817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1817.size); + RuntimeStat _elem1818; + for (int _i1819 = 0; _i1819 < _list1817.size; ++_i1819) { - _elem1810 = new RuntimeStat(); - _elem1810.read(iprot); - struct.success.add(_elem1810); + _elem1818 = new RuntimeStat(); + _elem1818.read(iprot); + struct.success.add(_elem1818); } } struct.setSuccessIsSet(true); @@ -265248,4 +265389,727 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_scheduled_query_ } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_replication_metrics_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_replication_metrics_args"); + + private static final org.apache.thrift.protocol.TField REPLICATION_METRIC_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("replicationMetricList", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new add_replication_metrics_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_replication_metrics_argsTupleSchemeFactory()); + } + + private ReplicationMetricList replicationMetricList; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REPLICATION_METRIC_LIST((short)1, "replicationMetricList"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REPLICATION_METRIC_LIST + return REPLICATION_METRIC_LIST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REPLICATION_METRIC_LIST, new org.apache.thrift.meta_data.FieldMetaData("replicationMetricList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ReplicationMetricList.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_replication_metrics_args.class, metaDataMap); + } + + public add_replication_metrics_args() { + } + + public add_replication_metrics_args( + ReplicationMetricList replicationMetricList) + { + this(); + this.replicationMetricList = replicationMetricList; + } + + /** + * Performs a deep copy on other. + */ + public add_replication_metrics_args(add_replication_metrics_args other) { + if (other.isSetReplicationMetricList()) { + this.replicationMetricList = new ReplicationMetricList(other.replicationMetricList); + } + } + + public add_replication_metrics_args deepCopy() { + return new add_replication_metrics_args(this); + } + + @Override + public void clear() { + this.replicationMetricList = null; + } + + public ReplicationMetricList getReplicationMetricList() { + return this.replicationMetricList; + } + + public void setReplicationMetricList(ReplicationMetricList replicationMetricList) { + this.replicationMetricList = replicationMetricList; + } + + public void unsetReplicationMetricList() { + this.replicationMetricList = null; + } + + /** Returns true if field replicationMetricList is set (has been assigned a value) and false otherwise */ + public boolean isSetReplicationMetricList() { + return this.replicationMetricList != null; + } + + public void setReplicationMetricListIsSet(boolean value) { + if (!value) { + this.replicationMetricList = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REPLICATION_METRIC_LIST: + if (value == null) { + unsetReplicationMetricList(); + } else { + setReplicationMetricList((ReplicationMetricList)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REPLICATION_METRIC_LIST: + return getReplicationMetricList(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REPLICATION_METRIC_LIST: + return isSetReplicationMetricList(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof add_replication_metrics_args) + return this.equals((add_replication_metrics_args)that); + return false; + } + + public boolean equals(add_replication_metrics_args that) { + if (that == null) + return false; + + boolean this_present_replicationMetricList = true && this.isSetReplicationMetricList(); + boolean that_present_replicationMetricList = true && that.isSetReplicationMetricList(); + if (this_present_replicationMetricList || that_present_replicationMetricList) { + if (!(this_present_replicationMetricList && that_present_replicationMetricList)) + return false; + if (!this.replicationMetricList.equals(that.replicationMetricList)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_replicationMetricList = true && (isSetReplicationMetricList()); + list.add(present_replicationMetricList); + if (present_replicationMetricList) + list.add(replicationMetricList); + + return list.hashCode(); + } + + @Override + public int compareTo(add_replication_metrics_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetReplicationMetricList()).compareTo(other.isSetReplicationMetricList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReplicationMetricList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.replicationMetricList, other.replicationMetricList); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("add_replication_metrics_args("); + boolean first = true; + + sb.append("replicationMetricList:"); + if (this.replicationMetricList == null) { + sb.append("null"); + } else { + sb.append(this.replicationMetricList); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (replicationMetricList != null) { + replicationMetricList.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class add_replication_metrics_argsStandardSchemeFactory implements SchemeFactory { + public add_replication_metrics_argsStandardScheme getScheme() { + return new add_replication_metrics_argsStandardScheme(); + } + } + + private static class add_replication_metrics_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, add_replication_metrics_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REPLICATION_METRIC_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.replicationMetricList = new ReplicationMetricList(); + struct.replicationMetricList.read(iprot); + struct.setReplicationMetricListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, add_replication_metrics_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.replicationMetricList != null) { + oprot.writeFieldBegin(REPLICATION_METRIC_LIST_FIELD_DESC); + struct.replicationMetricList.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class add_replication_metrics_argsTupleSchemeFactory implements SchemeFactory { + public add_replication_metrics_argsTupleScheme getScheme() { + return new add_replication_metrics_argsTupleScheme(); + } + } + + private static class add_replication_metrics_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, add_replication_metrics_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetReplicationMetricList()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetReplicationMetricList()) { + struct.replicationMetricList.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, add_replication_metrics_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.replicationMetricList = new ReplicationMetricList(); + struct.replicationMetricList.read(iprot); + struct.setReplicationMetricListIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_replication_metrics_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_replication_metrics_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new add_replication_metrics_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_replication_metrics_resultTupleSchemeFactory()); + } + + private MetaException o1; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_replication_metrics_result.class, metaDataMap); + } + + public add_replication_metrics_result() { + } + + public add_replication_metrics_result( + MetaException o1) + { + this(); + this.o1 = o1; + } + + /** + * Performs a deep copy on other. + */ + public add_replication_metrics_result(add_replication_metrics_result other) { + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + } + + public add_replication_metrics_result deepCopy() { + return new add_replication_metrics_result(this); + } + + @Override + public void clear() { + this.o1 = null; + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof add_replication_metrics_result) + return this.equals((add_replication_metrics_result)that); + return false; + } + + public boolean equals(add_replication_metrics_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + return list.hashCode(); + } + + @Override + public int compareTo(add_replication_metrics_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("add_replication_metrics_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class add_replication_metrics_resultStandardSchemeFactory implements SchemeFactory { + public add_replication_metrics_resultStandardScheme getScheme() { + return new add_replication_metrics_resultStandardScheme(); + } + } + + private static class add_replication_metrics_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, add_replication_metrics_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, add_replication_metrics_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class add_replication_metrics_resultTupleSchemeFactory implements SchemeFactory { + public add_replication_metrics_resultTupleScheme getScheme() { + return new add_replication_metrics_resultTupleScheme(); + } + } + + private static class add_replication_metrics_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, add_replication_metrics_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, add_replication_metrics_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + } + } + + } + } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index 803faf7413..39cbee14e6 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -1726,6 +1726,11 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @throws \metastore\NoSuchObjectException */ public function get_scheduled_query(\metastore\ScheduledQueryKey $scheduleKey); + /** + * @param \metastore\ReplicationMetricList $replicationMetricList + * @throws \metastore\MetaException + */ + public function add_replication_metrics(\metastore\ReplicationMetricList $replicationMetricList); } class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf { @@ -14905,6 +14910,57 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_scheduled_query failed: unknown result"); } + public function add_replication_metrics(\metastore\ReplicationMetricList $replicationMetricList) + { + $this->send_add_replication_metrics($replicationMetricList); + $this->recv_add_replication_metrics(); + } + + public function send_add_replication_metrics(\metastore\ReplicationMetricList $replicationMetricList) + { + $args = new \metastore\ThriftHiveMetastore_add_replication_metrics_args(); + $args->replicationMetricList = $replicationMetricList; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'add_replication_metrics', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('add_replication_metrics', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_add_replication_metrics() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_add_replication_metrics_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_add_replication_metrics_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + return; + } + } // HELPER FUNCTIONS AND STRUCTURES @@ -17252,14 +17308,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1064 = 0; - $_etype1067 = 0; - $xfer += $input->readListBegin($_etype1067, $_size1064); - for ($_i1068 = 0; $_i1068 < $_size1064; ++$_i1068) + $_size1071 = 0; + $_etype1074 = 0; + $xfer += $input->readListBegin($_etype1074, $_size1071); + for ($_i1075 = 0; $_i1075 < $_size1071; ++$_i1075) { - $elem1069 = null; - $xfer += $input->readString($elem1069); - $this->success []= $elem1069; + $elem1076 = null; + $xfer += $input->readString($elem1076); + $this->success []= $elem1076; } $xfer += $input->readListEnd(); } else { @@ -17295,9 +17351,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1070) + foreach ($this->success as $iter1077) { - $xfer += $output->writeString($iter1070); + $xfer += $output->writeString($iter1077); } } $output->writeListEnd(); @@ -17428,14 +17484,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1071 = 0; - $_etype1074 = 0; - $xfer += $input->readListBegin($_etype1074, $_size1071); - for ($_i1075 = 0; $_i1075 < $_size1071; ++$_i1075) + $_size1078 = 0; + $_etype1081 = 0; + $xfer += $input->readListBegin($_etype1081, $_size1078); + for ($_i1082 = 0; $_i1082 < $_size1078; ++$_i1082) { - $elem1076 = null; - $xfer += $input->readString($elem1076); - $this->success []= $elem1076; + $elem1083 = null; + $xfer += $input->readString($elem1083); + $this->success []= $elem1083; } $xfer += $input->readListEnd(); } else { @@ -17471,9 +17527,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1077) + foreach ($this->success as $iter1084) { - $xfer += $output->writeString($iter1077); + $xfer += $output->writeString($iter1084); } } $output->writeListEnd(); @@ -18474,18 +18530,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1078 = 0; - $_ktype1079 = 0; - $_vtype1080 = 0; - $xfer += $input->readMapBegin($_ktype1079, $_vtype1080, $_size1078); - for ($_i1082 = 0; $_i1082 < $_size1078; ++$_i1082) + $_size1085 = 0; + $_ktype1086 = 0; + $_vtype1087 = 0; + $xfer += $input->readMapBegin($_ktype1086, $_vtype1087, $_size1085); + for ($_i1089 = 0; $_i1089 < $_size1085; ++$_i1089) { - $key1083 = ''; - $val1084 = new \metastore\Type(); - $xfer += $input->readString($key1083); - $val1084 = new \metastore\Type(); - $xfer += $val1084->read($input); - $this->success[$key1083] = $val1084; + $key1090 = ''; + $val1091 = new \metastore\Type(); + $xfer += $input->readString($key1090); + $val1091 = new \metastore\Type(); + $xfer += $val1091->read($input); + $this->success[$key1090] = $val1091; } $xfer += $input->readMapEnd(); } else { @@ -18521,10 +18577,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter1085 => $viter1086) + foreach ($this->success as $kiter1092 => $viter1093) { - $xfer += $output->writeString($kiter1085); - $xfer += $viter1086->write($output); + $xfer += $output->writeString($kiter1092); + $xfer += $viter1093->write($output); } } $output->writeMapEnd(); @@ -18728,15 +18784,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1087 = 0; - $_etype1090 = 0; - $xfer += $input->readListBegin($_etype1090, $_size1087); - for ($_i1091 = 0; $_i1091 < $_size1087; ++$_i1091) + $_size1094 = 0; + $_etype1097 = 0; + $xfer += $input->readListBegin($_etype1097, $_size1094); + for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098) { - $elem1092 = null; - $elem1092 = new \metastore\FieldSchema(); - $xfer += $elem1092->read($input); - $this->success []= $elem1092; + $elem1099 = null; + $elem1099 = new \metastore\FieldSchema(); + $xfer += $elem1099->read($input); + $this->success []= $elem1099; } $xfer += $input->readListEnd(); } else { @@ -18788,9 +18844,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1093) + foreach ($this->success as $iter1100) { - $xfer += $iter1093->write($output); + $xfer += $iter1100->write($output); } } $output->writeListEnd(); @@ -19032,15 +19088,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1094 = 0; - $_etype1097 = 0; - $xfer += $input->readListBegin($_etype1097, $_size1094); - for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098) + $_size1101 = 0; + $_etype1104 = 0; + $xfer += $input->readListBegin($_etype1104, $_size1101); + for ($_i1105 = 0; $_i1105 < $_size1101; ++$_i1105) { - $elem1099 = null; - $elem1099 = new \metastore\FieldSchema(); - $xfer += $elem1099->read($input); - $this->success []= $elem1099; + $elem1106 = null; + $elem1106 = new \metastore\FieldSchema(); + $xfer += $elem1106->read($input); + $this->success []= $elem1106; } $xfer += $input->readListEnd(); } else { @@ -19092,9 +19148,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1100) + foreach ($this->success as $iter1107) { - $xfer += $iter1100->write($output); + $xfer += $iter1107->write($output); } } $output->writeListEnd(); @@ -19308,15 +19364,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1101 = 0; - $_etype1104 = 0; - $xfer += $input->readListBegin($_etype1104, $_size1101); - for ($_i1105 = 0; $_i1105 < $_size1101; ++$_i1105) + $_size1108 = 0; + $_etype1111 = 0; + $xfer += $input->readListBegin($_etype1111, $_size1108); + for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112) { - $elem1106 = null; - $elem1106 = new \metastore\FieldSchema(); - $xfer += $elem1106->read($input); - $this->success []= $elem1106; + $elem1113 = null; + $elem1113 = new \metastore\FieldSchema(); + $xfer += $elem1113->read($input); + $this->success []= $elem1113; } $xfer += $input->readListEnd(); } else { @@ -19368,9 +19424,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1107) + foreach ($this->success as $iter1114) { - $xfer += $iter1107->write($output); + $xfer += $iter1114->write($output); } } $output->writeListEnd(); @@ -19612,15 +19668,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1108 = 0; - $_etype1111 = 0; - $xfer += $input->readListBegin($_etype1111, $_size1108); - for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112) + $_size1115 = 0; + $_etype1118 = 0; + $xfer += $input->readListBegin($_etype1118, $_size1115); + for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119) { - $elem1113 = null; - $elem1113 = new \metastore\FieldSchema(); - $xfer += $elem1113->read($input); - $this->success []= $elem1113; + $elem1120 = null; + $elem1120 = new \metastore\FieldSchema(); + $xfer += $elem1120->read($input); + $this->success []= $elem1120; } $xfer += $input->readListEnd(); } else { @@ -19672,9 +19728,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1114) + foreach ($this->success as $iter1121) { - $xfer += $iter1114->write($output); + $xfer += $iter1121->write($output); } } $output->writeListEnd(); @@ -20346,15 +20402,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size1115 = 0; - $_etype1118 = 0; - $xfer += $input->readListBegin($_etype1118, $_size1115); - for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119) + $_size1122 = 0; + $_etype1125 = 0; + $xfer += $input->readListBegin($_etype1125, $_size1122); + for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126) { - $elem1120 = null; - $elem1120 = new \metastore\SQLPrimaryKey(); - $xfer += $elem1120->read($input); - $this->primaryKeys []= $elem1120; + $elem1127 = null; + $elem1127 = new \metastore\SQLPrimaryKey(); + $xfer += $elem1127->read($input); + $this->primaryKeys []= $elem1127; } $xfer += $input->readListEnd(); } else { @@ -20364,15 +20420,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size1121 = 0; - $_etype1124 = 0; - $xfer += $input->readListBegin($_etype1124, $_size1121); - for ($_i1125 = 0; $_i1125 < $_size1121; ++$_i1125) + $_size1128 = 0; + $_etype1131 = 0; + $xfer += $input->readListBegin($_etype1131, $_size1128); + for ($_i1132 = 0; $_i1132 < $_size1128; ++$_i1132) { - $elem1126 = null; - $elem1126 = new \metastore\SQLForeignKey(); - $xfer += $elem1126->read($input); - $this->foreignKeys []= $elem1126; + $elem1133 = null; + $elem1133 = new \metastore\SQLForeignKey(); + $xfer += $elem1133->read($input); + $this->foreignKeys []= $elem1133; } $xfer += $input->readListEnd(); } else { @@ -20382,15 +20438,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 4: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size1127 = 0; - $_etype1130 = 0; - $xfer += $input->readListBegin($_etype1130, $_size1127); - for ($_i1131 = 0; $_i1131 < $_size1127; ++$_i1131) + $_size1134 = 0; + $_etype1137 = 0; + $xfer += $input->readListBegin($_etype1137, $_size1134); + for ($_i1138 = 0; $_i1138 < $_size1134; ++$_i1138) { - $elem1132 = null; - $elem1132 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem1132->read($input); - $this->uniqueConstraints []= $elem1132; + $elem1139 = null; + $elem1139 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem1139->read($input); + $this->uniqueConstraints []= $elem1139; } $xfer += $input->readListEnd(); } else { @@ -20400,15 +20456,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 5: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size1133 = 0; - $_etype1136 = 0; - $xfer += $input->readListBegin($_etype1136, $_size1133); - for ($_i1137 = 0; $_i1137 < $_size1133; ++$_i1137) + $_size1140 = 0; + $_etype1143 = 0; + $xfer += $input->readListBegin($_etype1143, $_size1140); + for ($_i1144 = 0; $_i1144 < $_size1140; ++$_i1144) { - $elem1138 = null; - $elem1138 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem1138->read($input); - $this->notNullConstraints []= $elem1138; + $elem1145 = null; + $elem1145 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem1145->read($input); + $this->notNullConstraints []= $elem1145; } $xfer += $input->readListEnd(); } else { @@ -20418,15 +20474,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 6: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size1139 = 0; - $_etype1142 = 0; - $xfer += $input->readListBegin($_etype1142, $_size1139); - for ($_i1143 = 0; $_i1143 < $_size1139; ++$_i1143) + $_size1146 = 0; + $_etype1149 = 0; + $xfer += $input->readListBegin($_etype1149, $_size1146); + for ($_i1150 = 0; $_i1150 < $_size1146; ++$_i1150) { - $elem1144 = null; - $elem1144 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem1144->read($input); - $this->defaultConstraints []= $elem1144; + $elem1151 = null; + $elem1151 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem1151->read($input); + $this->defaultConstraints []= $elem1151; } $xfer += $input->readListEnd(); } else { @@ -20436,15 +20492,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 7: if ($ftype == TType::LST) { $this->checkConstraints = array(); - $_size1145 = 0; - $_etype1148 = 0; - $xfer += $input->readListBegin($_etype1148, $_size1145); - for ($_i1149 = 0; $_i1149 < $_size1145; ++$_i1149) + $_size1152 = 0; + $_etype1155 = 0; + $xfer += $input->readListBegin($_etype1155, $_size1152); + for ($_i1156 = 0; $_i1156 < $_size1152; ++$_i1156) { - $elem1150 = null; - $elem1150 = new \metastore\SQLCheckConstraint(); - $xfer += $elem1150->read($input); - $this->checkConstraints []= $elem1150; + $elem1157 = null; + $elem1157 = new \metastore\SQLCheckConstraint(); + $xfer += $elem1157->read($input); + $this->checkConstraints []= $elem1157; } $xfer += $input->readListEnd(); } else { @@ -20480,9 +20536,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter1151) + foreach ($this->primaryKeys as $iter1158) { - $xfer += $iter1151->write($output); + $xfer += $iter1158->write($output); } } $output->writeListEnd(); @@ -20497,9 +20553,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter1152) + foreach ($this->foreignKeys as $iter1159) { - $xfer += $iter1152->write($output); + $xfer += $iter1159->write($output); } } $output->writeListEnd(); @@ -20514,9 +20570,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter1153) + foreach ($this->uniqueConstraints as $iter1160) { - $xfer += $iter1153->write($output); + $xfer += $iter1160->write($output); } } $output->writeListEnd(); @@ -20531,9 +20587,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter1154) + foreach ($this->notNullConstraints as $iter1161) { - $xfer += $iter1154->write($output); + $xfer += $iter1161->write($output); } } $output->writeListEnd(); @@ -20548,9 +20604,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); { - foreach ($this->defaultConstraints as $iter1155) + foreach ($this->defaultConstraints as $iter1162) { - $xfer += $iter1155->write($output); + $xfer += $iter1162->write($output); } } $output->writeListEnd(); @@ -20565,9 +20621,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraints)); { - foreach ($this->checkConstraints as $iter1156) + foreach ($this->checkConstraints as $iter1163) { - $xfer += $iter1156->write($output); + $xfer += $iter1163->write($output); } } $output->writeListEnd(); @@ -22799,14 +22855,14 @@ class ThriftHiveMetastore_truncate_table_args { case 3: if ($ftype == TType::LST) { $this->partNames = array(); - $_size1157 = 0; - $_etype1160 = 0; - $xfer += $input->readListBegin($_etype1160, $_size1157); - for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) + $_size1164 = 0; + $_etype1167 = 0; + $xfer += $input->readListBegin($_etype1167, $_size1164); + for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) { - $elem1162 = null; - $xfer += $input->readString($elem1162); - $this->partNames []= $elem1162; + $elem1169 = null; + $xfer += $input->readString($elem1169); + $this->partNames []= $elem1169; } $xfer += $input->readListEnd(); } else { @@ -22844,9 +22900,9 @@ class ThriftHiveMetastore_truncate_table_args { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter1163) + foreach ($this->partNames as $iter1170) { - $xfer += $output->writeString($iter1163); + $xfer += $output->writeString($iter1170); } } $output->writeListEnd(); @@ -23282,14 +23338,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1164 = 0; - $_etype1167 = 0; - $xfer += $input->readListBegin($_etype1167, $_size1164); - for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) + $_size1171 = 0; + $_etype1174 = 0; + $xfer += $input->readListBegin($_etype1174, $_size1171); + for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) { - $elem1169 = null; - $xfer += $input->readString($elem1169); - $this->success []= $elem1169; + $elem1176 = null; + $xfer += $input->readString($elem1176); + $this->success []= $elem1176; } $xfer += $input->readListEnd(); } else { @@ -23325,9 +23381,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1170) + foreach ($this->success as $iter1177) { - $xfer += $output->writeString($iter1170); + $xfer += $output->writeString($iter1177); } } $output->writeListEnd(); @@ -23529,14 +23585,14 @@ class ThriftHiveMetastore_get_tables_by_type_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1171 = 0; - $_etype1174 = 0; - $xfer += $input->readListBegin($_etype1174, $_size1171); - for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) + $_size1178 = 0; + $_etype1181 = 0; + $xfer += $input->readListBegin($_etype1181, $_size1178); + for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) { - $elem1176 = null; - $xfer += $input->readString($elem1176); - $this->success []= $elem1176; + $elem1183 = null; + $xfer += $input->readString($elem1183); + $this->success []= $elem1183; } $xfer += $input->readListEnd(); } else { @@ -23572,9 +23628,9 @@ class ThriftHiveMetastore_get_tables_by_type_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1177) + foreach ($this->success as $iter1184) { - $xfer += $output->writeString($iter1177); + $xfer += $output->writeString($iter1184); } } $output->writeListEnd(); @@ -23706,15 +23762,15 @@ class ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_result case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1178 = 0; - $_etype1181 = 0; - $xfer += $input->readListBegin($_etype1181, $_size1178); - for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) + $_size1185 = 0; + $_etype1188 = 0; + $xfer += $input->readListBegin($_etype1188, $_size1185); + for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) { - $elem1183 = null; - $elem1183 = new \metastore\Table(); - $xfer += $elem1183->read($input); - $this->success []= $elem1183; + $elem1190 = null; + $elem1190 = new \metastore\Table(); + $xfer += $elem1190->read($input); + $this->success []= $elem1190; } $xfer += $input->readListEnd(); } else { @@ -23750,9 +23806,9 @@ class ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_result { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1184) + foreach ($this->success as $iter1191) { - $xfer += $iter1184->write($output); + $xfer += $iter1191->write($output); } } $output->writeListEnd(); @@ -23908,14 +23964,14 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1185 = 0; - $_etype1188 = 0; - $xfer += $input->readListBegin($_etype1188, $_size1185); - for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) + $_size1192 = 0; + $_etype1195 = 0; + $xfer += $input->readListBegin($_etype1195, $_size1192); + for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196) { - $elem1190 = null; - $xfer += $input->readString($elem1190); - $this->success []= $elem1190; + $elem1197 = null; + $xfer += $input->readString($elem1197); + $this->success []= $elem1197; } $xfer += $input->readListEnd(); } else { @@ -23951,9 +24007,9 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1191) + foreach ($this->success as $iter1198) { - $xfer += $output->writeString($iter1191); + $xfer += $output->writeString($iter1198); } } $output->writeListEnd(); @@ -24058,14 +24114,14 @@ class ThriftHiveMetastore_get_table_meta_args { case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size1192 = 0; - $_etype1195 = 0; - $xfer += $input->readListBegin($_etype1195, $_size1192); - for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196) + $_size1199 = 0; + $_etype1202 = 0; + $xfer += $input->readListBegin($_etype1202, $_size1199); + for ($_i1203 = 0; $_i1203 < $_size1199; ++$_i1203) { - $elem1197 = null; - $xfer += $input->readString($elem1197); - $this->tbl_types []= $elem1197; + $elem1204 = null; + $xfer += $input->readString($elem1204); + $this->tbl_types []= $elem1204; } $xfer += $input->readListEnd(); } else { @@ -24103,9 +24159,9 @@ class ThriftHiveMetastore_get_table_meta_args { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter1198) + foreach ($this->tbl_types as $iter1205) { - $xfer += $output->writeString($iter1198); + $xfer += $output->writeString($iter1205); } } $output->writeListEnd(); @@ -24182,15 +24238,15 @@ class ThriftHiveMetastore_get_table_meta_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1199 = 0; - $_etype1202 = 0; - $xfer += $input->readListBegin($_etype1202, $_size1199); - for ($_i1203 = 0; $_i1203 < $_size1199; ++$_i1203) + $_size1206 = 0; + $_etype1209 = 0; + $xfer += $input->readListBegin($_etype1209, $_size1206); + for ($_i1210 = 0; $_i1210 < $_size1206; ++$_i1210) { - $elem1204 = null; - $elem1204 = new \metastore\TableMeta(); - $xfer += $elem1204->read($input); - $this->success []= $elem1204; + $elem1211 = null; + $elem1211 = new \metastore\TableMeta(); + $xfer += $elem1211->read($input); + $this->success []= $elem1211; } $xfer += $input->readListEnd(); } else { @@ -24226,9 +24282,9 @@ class ThriftHiveMetastore_get_table_meta_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1205) + foreach ($this->success as $iter1212) { - $xfer += $iter1205->write($output); + $xfer += $iter1212->write($output); } } $output->writeListEnd(); @@ -24384,14 +24440,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1206 = 0; - $_etype1209 = 0; - $xfer += $input->readListBegin($_etype1209, $_size1206); - for ($_i1210 = 0; $_i1210 < $_size1206; ++$_i1210) + $_size1213 = 0; + $_etype1216 = 0; + $xfer += $input->readListBegin($_etype1216, $_size1213); + for ($_i1217 = 0; $_i1217 < $_size1213; ++$_i1217) { - $elem1211 = null; - $xfer += $input->readString($elem1211); - $this->success []= $elem1211; + $elem1218 = null; + $xfer += $input->readString($elem1218); + $this->success []= $elem1218; } $xfer += $input->readListEnd(); } else { @@ -24427,9 +24483,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1212) + foreach ($this->success as $iter1219) { - $xfer += $output->writeString($iter1212); + $xfer += $output->writeString($iter1219); } } $output->writeListEnd(); @@ -24744,14 +24800,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size1213 = 0; - $_etype1216 = 0; - $xfer += $input->readListBegin($_etype1216, $_size1213); - for ($_i1217 = 0; $_i1217 < $_size1213; ++$_i1217) + $_size1220 = 0; + $_etype1223 = 0; + $xfer += $input->readListBegin($_etype1223, $_size1220); + for ($_i1224 = 0; $_i1224 < $_size1220; ++$_i1224) { - $elem1218 = null; - $xfer += $input->readString($elem1218); - $this->tbl_names []= $elem1218; + $elem1225 = null; + $xfer += $input->readString($elem1225); + $this->tbl_names []= $elem1225; } $xfer += $input->readListEnd(); } else { @@ -24784,9 +24840,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter1219) + foreach ($this->tbl_names as $iter1226) { - $xfer += $output->writeString($iter1219); + $xfer += $output->writeString($iter1226); } } $output->writeListEnd(); @@ -24851,15 +24907,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1220 = 0; - $_etype1223 = 0; - $xfer += $input->readListBegin($_etype1223, $_size1220); - for ($_i1224 = 0; $_i1224 < $_size1220; ++$_i1224) + $_size1227 = 0; + $_etype1230 = 0; + $xfer += $input->readListBegin($_etype1230, $_size1227); + for ($_i1231 = 0; $_i1231 < $_size1227; ++$_i1231) { - $elem1225 = null; - $elem1225 = new \metastore\Table(); - $xfer += $elem1225->read($input); - $this->success []= $elem1225; + $elem1232 = null; + $elem1232 = new \metastore\Table(); + $xfer += $elem1232->read($input); + $this->success []= $elem1232; } $xfer += $input->readListEnd(); } else { @@ -24887,9 +24943,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1226) + foreach ($this->success as $iter1233) { - $xfer += $iter1226->write($output); + $xfer += $iter1233->write($output); } } $output->writeListEnd(); @@ -25046,15 +25102,15 @@ class ThriftHiveMetastore_get_tables_ext_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1227 = 0; - $_etype1230 = 0; - $xfer += $input->readListBegin($_etype1230, $_size1227); - for ($_i1231 = 0; $_i1231 < $_size1227; ++$_i1231) + $_size1234 = 0; + $_etype1237 = 0; + $xfer += $input->readListBegin($_etype1237, $_size1234); + for ($_i1238 = 0; $_i1238 < $_size1234; ++$_i1238) { - $elem1232 = null; - $elem1232 = new \metastore\ExtendedTableInfo(); - $xfer += $elem1232->read($input); - $this->success []= $elem1232; + $elem1239 = null; + $elem1239 = new \metastore\ExtendedTableInfo(); + $xfer += $elem1239->read($input); + $this->success []= $elem1239; } $xfer += $input->readListEnd(); } else { @@ -25090,9 +25146,9 @@ class ThriftHiveMetastore_get_tables_ext_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1233) + foreach ($this->success as $iter1240) { - $xfer += $iter1233->write($output); + $xfer += $iter1240->write($output); } } $output->writeListEnd(); @@ -26297,14 +26353,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1234 = 0; - $_etype1237 = 0; - $xfer += $input->readListBegin($_etype1237, $_size1234); - for ($_i1238 = 0; $_i1238 < $_size1234; ++$_i1238) + $_size1241 = 0; + $_etype1244 = 0; + $xfer += $input->readListBegin($_etype1244, $_size1241); + for ($_i1245 = 0; $_i1245 < $_size1241; ++$_i1245) { - $elem1239 = null; - $xfer += $input->readString($elem1239); - $this->success []= $elem1239; + $elem1246 = null; + $xfer += $input->readString($elem1246); + $this->success []= $elem1246; } $xfer += $input->readListEnd(); } else { @@ -26356,9 +26412,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1240) + foreach ($this->success as $iter1247) { - $xfer += $output->writeString($iter1240); + $xfer += $output->writeString($iter1247); } } $output->writeListEnd(); @@ -27881,15 +27937,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1241 = 0; - $_etype1244 = 0; - $xfer += $input->readListBegin($_etype1244, $_size1241); - for ($_i1245 = 0; $_i1245 < $_size1241; ++$_i1245) + $_size1248 = 0; + $_etype1251 = 0; + $xfer += $input->readListBegin($_etype1251, $_size1248); + for ($_i1252 = 0; $_i1252 < $_size1248; ++$_i1252) { - $elem1246 = null; - $elem1246 = new \metastore\Partition(); - $xfer += $elem1246->read($input); - $this->new_parts []= $elem1246; + $elem1253 = null; + $elem1253 = new \metastore\Partition(); + $xfer += $elem1253->read($input); + $this->new_parts []= $elem1253; } $xfer += $input->readListEnd(); } else { @@ -27917,9 +27973,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1247) + foreach ($this->new_parts as $iter1254) { - $xfer += $iter1247->write($output); + $xfer += $iter1254->write($output); } } $output->writeListEnd(); @@ -28134,15 +28190,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1248 = 0; - $_etype1251 = 0; - $xfer += $input->readListBegin($_etype1251, $_size1248); - for ($_i1252 = 0; $_i1252 < $_size1248; ++$_i1252) + $_size1255 = 0; + $_etype1258 = 0; + $xfer += $input->readListBegin($_etype1258, $_size1255); + for ($_i1259 = 0; $_i1259 < $_size1255; ++$_i1259) { - $elem1253 = null; - $elem1253 = new \metastore\PartitionSpec(); - $xfer += $elem1253->read($input); - $this->new_parts []= $elem1253; + $elem1260 = null; + $elem1260 = new \metastore\PartitionSpec(); + $xfer += $elem1260->read($input); + $this->new_parts []= $elem1260; } $xfer += $input->readListEnd(); } else { @@ -28170,9 +28226,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1254) + foreach ($this->new_parts as $iter1261) { - $xfer += $iter1254->write($output); + $xfer += $iter1261->write($output); } } $output->writeListEnd(); @@ -28422,14 +28478,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1255 = 0; - $_etype1258 = 0; - $xfer += $input->readListBegin($_etype1258, $_size1255); - for ($_i1259 = 0; $_i1259 < $_size1255; ++$_i1259) + $_size1262 = 0; + $_etype1265 = 0; + $xfer += $input->readListBegin($_etype1265, $_size1262); + for ($_i1266 = 0; $_i1266 < $_size1262; ++$_i1266) { - $elem1260 = null; - $xfer += $input->readString($elem1260); - $this->part_vals []= $elem1260; + $elem1267 = null; + $xfer += $input->readString($elem1267); + $this->part_vals []= $elem1267; } $xfer += $input->readListEnd(); } else { @@ -28467,9 +28523,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1261) + foreach ($this->part_vals as $iter1268) { - $xfer += $output->writeString($iter1261); + $xfer += $output->writeString($iter1268); } } $output->writeListEnd(); @@ -28971,14 +29027,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1262 = 0; - $_etype1265 = 0; - $xfer += $input->readListBegin($_etype1265, $_size1262); - for ($_i1266 = 0; $_i1266 < $_size1262; ++$_i1266) + $_size1269 = 0; + $_etype1272 = 0; + $xfer += $input->readListBegin($_etype1272, $_size1269); + for ($_i1273 = 0; $_i1273 < $_size1269; ++$_i1273) { - $elem1267 = null; - $xfer += $input->readString($elem1267); - $this->part_vals []= $elem1267; + $elem1274 = null; + $xfer += $input->readString($elem1274); + $this->part_vals []= $elem1274; } $xfer += $input->readListEnd(); } else { @@ -29024,9 +29080,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1268) + foreach ($this->part_vals as $iter1275) { - $xfer += $output->writeString($iter1268); + $xfer += $output->writeString($iter1275); } } $output->writeListEnd(); @@ -29880,14 +29936,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1269 = 0; - $_etype1272 = 0; - $xfer += $input->readListBegin($_etype1272, $_size1269); - for ($_i1273 = 0; $_i1273 < $_size1269; ++$_i1273) + $_size1276 = 0; + $_etype1279 = 0; + $xfer += $input->readListBegin($_etype1279, $_size1276); + for ($_i1280 = 0; $_i1280 < $_size1276; ++$_i1280) { - $elem1274 = null; - $xfer += $input->readString($elem1274); - $this->part_vals []= $elem1274; + $elem1281 = null; + $xfer += $input->readString($elem1281); + $this->part_vals []= $elem1281; } $xfer += $input->readListEnd(); } else { @@ -29932,9 +29988,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1275) + foreach ($this->part_vals as $iter1282) { - $xfer += $output->writeString($iter1275); + $xfer += $output->writeString($iter1282); } } $output->writeListEnd(); @@ -30187,14 +30243,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1276 = 0; - $_etype1279 = 0; - $xfer += $input->readListBegin($_etype1279, $_size1276); - for ($_i1280 = 0; $_i1280 < $_size1276; ++$_i1280) + $_size1283 = 0; + $_etype1286 = 0; + $xfer += $input->readListBegin($_etype1286, $_size1283); + for ($_i1287 = 0; $_i1287 < $_size1283; ++$_i1287) { - $elem1281 = null; - $xfer += $input->readString($elem1281); - $this->part_vals []= $elem1281; + $elem1288 = null; + $xfer += $input->readString($elem1288); + $this->part_vals []= $elem1288; } $xfer += $input->readListEnd(); } else { @@ -30247,9 +30303,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1282) + foreach ($this->part_vals as $iter1289) { - $xfer += $output->writeString($iter1282); + $xfer += $output->writeString($iter1289); } } $output->writeListEnd(); @@ -31263,14 +31319,14 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1283 = 0; - $_etype1286 = 0; - $xfer += $input->readListBegin($_etype1286, $_size1283); - for ($_i1287 = 0; $_i1287 < $_size1283; ++$_i1287) + $_size1290 = 0; + $_etype1293 = 0; + $xfer += $input->readListBegin($_etype1293, $_size1290); + for ($_i1294 = 0; $_i1294 < $_size1290; ++$_i1294) { - $elem1288 = null; - $xfer += $input->readString($elem1288); - $this->part_vals []= $elem1288; + $elem1295 = null; + $xfer += $input->readString($elem1295); + $this->part_vals []= $elem1295; } $xfer += $input->readListEnd(); } else { @@ -31308,9 +31364,9 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1289) + foreach ($this->part_vals as $iter1296) { - $xfer += $output->writeString($iter1289); + $xfer += $output->writeString($iter1296); } } $output->writeListEnd(); @@ -31552,17 +31608,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1290 = 0; - $_ktype1291 = 0; - $_vtype1292 = 0; - $xfer += $input->readMapBegin($_ktype1291, $_vtype1292, $_size1290); - for ($_i1294 = 0; $_i1294 < $_size1290; ++$_i1294) + $_size1297 = 0; + $_ktype1298 = 0; + $_vtype1299 = 0; + $xfer += $input->readMapBegin($_ktype1298, $_vtype1299, $_size1297); + for ($_i1301 = 0; $_i1301 < $_size1297; ++$_i1301) { - $key1295 = ''; - $val1296 = ''; - $xfer += $input->readString($key1295); - $xfer += $input->readString($val1296); - $this->partitionSpecs[$key1295] = $val1296; + $key1302 = ''; + $val1303 = ''; + $xfer += $input->readString($key1302); + $xfer += $input->readString($val1303); + $this->partitionSpecs[$key1302] = $val1303; } $xfer += $input->readMapEnd(); } else { @@ -31618,10 +31674,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1297 => $viter1298) + foreach ($this->partitionSpecs as $kiter1304 => $viter1305) { - $xfer += $output->writeString($kiter1297); - $xfer += $output->writeString($viter1298); + $xfer += $output->writeString($kiter1304); + $xfer += $output->writeString($viter1305); } } $output->writeMapEnd(); @@ -31933,17 +31989,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1299 = 0; - $_ktype1300 = 0; - $_vtype1301 = 0; - $xfer += $input->readMapBegin($_ktype1300, $_vtype1301, $_size1299); - for ($_i1303 = 0; $_i1303 < $_size1299; ++$_i1303) + $_size1306 = 0; + $_ktype1307 = 0; + $_vtype1308 = 0; + $xfer += $input->readMapBegin($_ktype1307, $_vtype1308, $_size1306); + for ($_i1310 = 0; $_i1310 < $_size1306; ++$_i1310) { - $key1304 = ''; - $val1305 = ''; - $xfer += $input->readString($key1304); - $xfer += $input->readString($val1305); - $this->partitionSpecs[$key1304] = $val1305; + $key1311 = ''; + $val1312 = ''; + $xfer += $input->readString($key1311); + $xfer += $input->readString($val1312); + $this->partitionSpecs[$key1311] = $val1312; } $xfer += $input->readMapEnd(); } else { @@ -31999,10 +32055,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1306 => $viter1307) + foreach ($this->partitionSpecs as $kiter1313 => $viter1314) { - $xfer += $output->writeString($kiter1306); - $xfer += $output->writeString($viter1307); + $xfer += $output->writeString($kiter1313); + $xfer += $output->writeString($viter1314); } } $output->writeMapEnd(); @@ -32135,15 +32191,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1308 = 0; - $_etype1311 = 0; - $xfer += $input->readListBegin($_etype1311, $_size1308); - for ($_i1312 = 0; $_i1312 < $_size1308; ++$_i1312) + $_size1315 = 0; + $_etype1318 = 0; + $xfer += $input->readListBegin($_etype1318, $_size1315); + for ($_i1319 = 0; $_i1319 < $_size1315; ++$_i1319) { - $elem1313 = null; - $elem1313 = new \metastore\Partition(); - $xfer += $elem1313->read($input); - $this->success []= $elem1313; + $elem1320 = null; + $elem1320 = new \metastore\Partition(); + $xfer += $elem1320->read($input); + $this->success []= $elem1320; } $xfer += $input->readListEnd(); } else { @@ -32203,9 +32259,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1314) + foreach ($this->success as $iter1321) { - $xfer += $iter1314->write($output); + $xfer += $iter1321->write($output); } } $output->writeListEnd(); @@ -32351,14 +32407,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1315 = 0; - $_etype1318 = 0; - $xfer += $input->readListBegin($_etype1318, $_size1315); - for ($_i1319 = 0; $_i1319 < $_size1315; ++$_i1319) + $_size1322 = 0; + $_etype1325 = 0; + $xfer += $input->readListBegin($_etype1325, $_size1322); + for ($_i1326 = 0; $_i1326 < $_size1322; ++$_i1326) { - $elem1320 = null; - $xfer += $input->readString($elem1320); - $this->part_vals []= $elem1320; + $elem1327 = null; + $xfer += $input->readString($elem1327); + $this->part_vals []= $elem1327; } $xfer += $input->readListEnd(); } else { @@ -32375,14 +32431,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1321 = 0; - $_etype1324 = 0; - $xfer += $input->readListBegin($_etype1324, $_size1321); - for ($_i1325 = 0; $_i1325 < $_size1321; ++$_i1325) + $_size1328 = 0; + $_etype1331 = 0; + $xfer += $input->readListBegin($_etype1331, $_size1328); + for ($_i1332 = 0; $_i1332 < $_size1328; ++$_i1332) { - $elem1326 = null; - $xfer += $input->readString($elem1326); - $this->group_names []= $elem1326; + $elem1333 = null; + $xfer += $input->readString($elem1333); + $this->group_names []= $elem1333; } $xfer += $input->readListEnd(); } else { @@ -32420,9 +32476,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1327) + foreach ($this->part_vals as $iter1334) { - $xfer += $output->writeString($iter1327); + $xfer += $output->writeString($iter1334); } } $output->writeListEnd(); @@ -32442,9 +32498,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1328) + foreach ($this->group_names as $iter1335) { - $xfer += $output->writeString($iter1328); + $xfer += $output->writeString($iter1335); } } $output->writeListEnd(); @@ -33035,15 +33091,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1329 = 0; - $_etype1332 = 0; - $xfer += $input->readListBegin($_etype1332, $_size1329); - for ($_i1333 = 0; $_i1333 < $_size1329; ++$_i1333) + $_size1336 = 0; + $_etype1339 = 0; + $xfer += $input->readListBegin($_etype1339, $_size1336); + for ($_i1340 = 0; $_i1340 < $_size1336; ++$_i1340) { - $elem1334 = null; - $elem1334 = new \metastore\Partition(); - $xfer += $elem1334->read($input); - $this->success []= $elem1334; + $elem1341 = null; + $elem1341 = new \metastore\Partition(); + $xfer += $elem1341->read($input); + $this->success []= $elem1341; } $xfer += $input->readListEnd(); } else { @@ -33087,9 +33143,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1335) + foreach ($this->success as $iter1342) { - $xfer += $iter1335->write($output); + $xfer += $iter1342->write($output); } } $output->writeListEnd(); @@ -33235,14 +33291,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1336 = 0; - $_etype1339 = 0; - $xfer += $input->readListBegin($_etype1339, $_size1336); - for ($_i1340 = 0; $_i1340 < $_size1336; ++$_i1340) + $_size1343 = 0; + $_etype1346 = 0; + $xfer += $input->readListBegin($_etype1346, $_size1343); + for ($_i1347 = 0; $_i1347 < $_size1343; ++$_i1347) { - $elem1341 = null; - $xfer += $input->readString($elem1341); - $this->group_names []= $elem1341; + $elem1348 = null; + $xfer += $input->readString($elem1348); + $this->group_names []= $elem1348; } $xfer += $input->readListEnd(); } else { @@ -33290,9 +33346,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1342) + foreach ($this->group_names as $iter1349) { - $xfer += $output->writeString($iter1342); + $xfer += $output->writeString($iter1349); } } $output->writeListEnd(); @@ -33381,15 +33437,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1343 = 0; - $_etype1346 = 0; - $xfer += $input->readListBegin($_etype1346, $_size1343); - for ($_i1347 = 0; $_i1347 < $_size1343; ++$_i1347) + $_size1350 = 0; + $_etype1353 = 0; + $xfer += $input->readListBegin($_etype1353, $_size1350); + for ($_i1354 = 0; $_i1354 < $_size1350; ++$_i1354) { - $elem1348 = null; - $elem1348 = new \metastore\Partition(); - $xfer += $elem1348->read($input); - $this->success []= $elem1348; + $elem1355 = null; + $elem1355 = new \metastore\Partition(); + $xfer += $elem1355->read($input); + $this->success []= $elem1355; } $xfer += $input->readListEnd(); } else { @@ -33433,9 +33489,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1349) + foreach ($this->success as $iter1356) { - $xfer += $iter1349->write($output); + $xfer += $iter1356->write($output); } } $output->writeListEnd(); @@ -33655,15 +33711,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1350 = 0; - $_etype1353 = 0; - $xfer += $input->readListBegin($_etype1353, $_size1350); - for ($_i1354 = 0; $_i1354 < $_size1350; ++$_i1354) + $_size1357 = 0; + $_etype1360 = 0; + $xfer += $input->readListBegin($_etype1360, $_size1357); + for ($_i1361 = 0; $_i1361 < $_size1357; ++$_i1361) { - $elem1355 = null; - $elem1355 = new \metastore\PartitionSpec(); - $xfer += $elem1355->read($input); - $this->success []= $elem1355; + $elem1362 = null; + $elem1362 = new \metastore\PartitionSpec(); + $xfer += $elem1362->read($input); + $this->success []= $elem1362; } $xfer += $input->readListEnd(); } else { @@ -33707,9 +33763,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1356) + foreach ($this->success as $iter1363) { - $xfer += $iter1356->write($output); + $xfer += $iter1363->write($output); } } $output->writeListEnd(); @@ -33928,14 +33984,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1357 = 0; - $_etype1360 = 0; - $xfer += $input->readListBegin($_etype1360, $_size1357); - for ($_i1361 = 0; $_i1361 < $_size1357; ++$_i1361) + $_size1364 = 0; + $_etype1367 = 0; + $xfer += $input->readListBegin($_etype1367, $_size1364); + for ($_i1368 = 0; $_i1368 < $_size1364; ++$_i1368) { - $elem1362 = null; - $xfer += $input->readString($elem1362); - $this->success []= $elem1362; + $elem1369 = null; + $xfer += $input->readString($elem1369); + $this->success []= $elem1369; } $xfer += $input->readListEnd(); } else { @@ -33979,9 +34035,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1363) + foreach ($this->success as $iter1370) { - $xfer += $output->writeString($iter1363); + $xfer += $output->writeString($iter1370); } } $output->writeListEnd(); @@ -34312,14 +34368,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1364 = 0; - $_etype1367 = 0; - $xfer += $input->readListBegin($_etype1367, $_size1364); - for ($_i1368 = 0; $_i1368 < $_size1364; ++$_i1368) + $_size1371 = 0; + $_etype1374 = 0; + $xfer += $input->readListBegin($_etype1374, $_size1371); + for ($_i1375 = 0; $_i1375 < $_size1371; ++$_i1375) { - $elem1369 = null; - $xfer += $input->readString($elem1369); - $this->part_vals []= $elem1369; + $elem1376 = null; + $xfer += $input->readString($elem1376); + $this->part_vals []= $elem1376; } $xfer += $input->readListEnd(); } else { @@ -34364,9 +34420,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1370) + foreach ($this->part_vals as $iter1377) { - $xfer += $output->writeString($iter1370); + $xfer += $output->writeString($iter1377); } } $output->writeListEnd(); @@ -34460,15 +34516,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1371 = 0; - $_etype1374 = 0; - $xfer += $input->readListBegin($_etype1374, $_size1371); - for ($_i1375 = 0; $_i1375 < $_size1371; ++$_i1375) + $_size1378 = 0; + $_etype1381 = 0; + $xfer += $input->readListBegin($_etype1381, $_size1378); + for ($_i1382 = 0; $_i1382 < $_size1378; ++$_i1382) { - $elem1376 = null; - $elem1376 = new \metastore\Partition(); - $xfer += $elem1376->read($input); - $this->success []= $elem1376; + $elem1383 = null; + $elem1383 = new \metastore\Partition(); + $xfer += $elem1383->read($input); + $this->success []= $elem1383; } $xfer += $input->readListEnd(); } else { @@ -34512,9 +34568,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1377) + foreach ($this->success as $iter1384) { - $xfer += $iter1377->write($output); + $xfer += $iter1384->write($output); } } $output->writeListEnd(); @@ -34661,14 +34717,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1378 = 0; - $_etype1381 = 0; - $xfer += $input->readListBegin($_etype1381, $_size1378); - for ($_i1382 = 0; $_i1382 < $_size1378; ++$_i1382) + $_size1385 = 0; + $_etype1388 = 0; + $xfer += $input->readListBegin($_etype1388, $_size1385); + for ($_i1389 = 0; $_i1389 < $_size1385; ++$_i1389) { - $elem1383 = null; - $xfer += $input->readString($elem1383); - $this->part_vals []= $elem1383; + $elem1390 = null; + $xfer += $input->readString($elem1390); + $this->part_vals []= $elem1390; } $xfer += $input->readListEnd(); } else { @@ -34692,14 +34748,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1384 = 0; - $_etype1387 = 0; - $xfer += $input->readListBegin($_etype1387, $_size1384); - for ($_i1388 = 0; $_i1388 < $_size1384; ++$_i1388) + $_size1391 = 0; + $_etype1394 = 0; + $xfer += $input->readListBegin($_etype1394, $_size1391); + for ($_i1395 = 0; $_i1395 < $_size1391; ++$_i1395) { - $elem1389 = null; - $xfer += $input->readString($elem1389); - $this->group_names []= $elem1389; + $elem1396 = null; + $xfer += $input->readString($elem1396); + $this->group_names []= $elem1396; } $xfer += $input->readListEnd(); } else { @@ -34737,9 +34793,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1390) + foreach ($this->part_vals as $iter1397) { - $xfer += $output->writeString($iter1390); + $xfer += $output->writeString($iter1397); } } $output->writeListEnd(); @@ -34764,9 +34820,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1391) + foreach ($this->group_names as $iter1398) { - $xfer += $output->writeString($iter1391); + $xfer += $output->writeString($iter1398); } } $output->writeListEnd(); @@ -34855,15 +34911,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1392 = 0; - $_etype1395 = 0; - $xfer += $input->readListBegin($_etype1395, $_size1392); - for ($_i1396 = 0; $_i1396 < $_size1392; ++$_i1396) + $_size1399 = 0; + $_etype1402 = 0; + $xfer += $input->readListBegin($_etype1402, $_size1399); + for ($_i1403 = 0; $_i1403 < $_size1399; ++$_i1403) { - $elem1397 = null; - $elem1397 = new \metastore\Partition(); - $xfer += $elem1397->read($input); - $this->success []= $elem1397; + $elem1404 = null; + $elem1404 = new \metastore\Partition(); + $xfer += $elem1404->read($input); + $this->success []= $elem1404; } $xfer += $input->readListEnd(); } else { @@ -34907,9 +34963,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1398) + foreach ($this->success as $iter1405) { - $xfer += $iter1398->write($output); + $xfer += $iter1405->write($output); } } $output->writeListEnd(); @@ -35030,14 +35086,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1399 = 0; - $_etype1402 = 0; - $xfer += $input->readListBegin($_etype1402, $_size1399); - for ($_i1403 = 0; $_i1403 < $_size1399; ++$_i1403) + $_size1406 = 0; + $_etype1409 = 0; + $xfer += $input->readListBegin($_etype1409, $_size1406); + for ($_i1410 = 0; $_i1410 < $_size1406; ++$_i1410) { - $elem1404 = null; - $xfer += $input->readString($elem1404); - $this->part_vals []= $elem1404; + $elem1411 = null; + $xfer += $input->readString($elem1411); + $this->part_vals []= $elem1411; } $xfer += $input->readListEnd(); } else { @@ -35082,9 +35138,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1405) + foreach ($this->part_vals as $iter1412) { - $xfer += $output->writeString($iter1405); + $xfer += $output->writeString($iter1412); } } $output->writeListEnd(); @@ -35177,14 +35233,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1406 = 0; - $_etype1409 = 0; - $xfer += $input->readListBegin($_etype1409, $_size1406); - for ($_i1410 = 0; $_i1410 < $_size1406; ++$_i1410) + $_size1413 = 0; + $_etype1416 = 0; + $xfer += $input->readListBegin($_etype1416, $_size1413); + for ($_i1417 = 0; $_i1417 < $_size1413; ++$_i1417) { - $elem1411 = null; - $xfer += $input->readString($elem1411); - $this->success []= $elem1411; + $elem1418 = null; + $xfer += $input->readString($elem1418); + $this->success []= $elem1418; } $xfer += $input->readListEnd(); } else { @@ -35228,9 +35284,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1412) + foreach ($this->success as $iter1419) { - $xfer += $output->writeString($iter1412); + $xfer += $output->writeString($iter1419); } } $output->writeListEnd(); @@ -35408,14 +35464,14 @@ class ThriftHiveMetastore_get_partition_names_req_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1413 = 0; - $_etype1416 = 0; - $xfer += $input->readListBegin($_etype1416, $_size1413); - for ($_i1417 = 0; $_i1417 < $_size1413; ++$_i1417) + $_size1420 = 0; + $_etype1423 = 0; + $xfer += $input->readListBegin($_etype1423, $_size1420); + for ($_i1424 = 0; $_i1424 < $_size1420; ++$_i1424) { - $elem1418 = null; - $xfer += $input->readString($elem1418); - $this->success []= $elem1418; + $elem1425 = null; + $xfer += $input->readString($elem1425); + $this->success []= $elem1425; } $xfer += $input->readListEnd(); } else { @@ -35459,9 +35515,9 @@ class ThriftHiveMetastore_get_partition_names_req_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1419) + foreach ($this->success as $iter1426) { - $xfer += $output->writeString($iter1419); + $xfer += $output->writeString($iter1426); } } $output->writeListEnd(); @@ -35704,15 +35760,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1420 = 0; - $_etype1423 = 0; - $xfer += $input->readListBegin($_etype1423, $_size1420); - for ($_i1424 = 0; $_i1424 < $_size1420; ++$_i1424) + $_size1427 = 0; + $_etype1430 = 0; + $xfer += $input->readListBegin($_etype1430, $_size1427); + for ($_i1431 = 0; $_i1431 < $_size1427; ++$_i1431) { - $elem1425 = null; - $elem1425 = new \metastore\Partition(); - $xfer += $elem1425->read($input); - $this->success []= $elem1425; + $elem1432 = null; + $elem1432 = new \metastore\Partition(); + $xfer += $elem1432->read($input); + $this->success []= $elem1432; } $xfer += $input->readListEnd(); } else { @@ -35756,9 +35812,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1426) + foreach ($this->success as $iter1433) { - $xfer += $iter1426->write($output); + $xfer += $iter1433->write($output); } } $output->writeListEnd(); @@ -36001,15 +36057,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1427 = 0; - $_etype1430 = 0; - $xfer += $input->readListBegin($_etype1430, $_size1427); - for ($_i1431 = 0; $_i1431 < $_size1427; ++$_i1431) + $_size1434 = 0; + $_etype1437 = 0; + $xfer += $input->readListBegin($_etype1437, $_size1434); + for ($_i1438 = 0; $_i1438 < $_size1434; ++$_i1438) { - $elem1432 = null; - $elem1432 = new \metastore\PartitionSpec(); - $xfer += $elem1432->read($input); - $this->success []= $elem1432; + $elem1439 = null; + $elem1439 = new \metastore\PartitionSpec(); + $xfer += $elem1439->read($input); + $this->success []= $elem1439; } $xfer += $input->readListEnd(); } else { @@ -36053,9 +36109,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1433) + foreach ($this->success as $iter1440) { - $xfer += $iter1433->write($output); + $xfer += $iter1440->write($output); } } $output->writeListEnd(); @@ -36831,14 +36887,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1434 = 0; - $_etype1437 = 0; - $xfer += $input->readListBegin($_etype1437, $_size1434); - for ($_i1438 = 0; $_i1438 < $_size1434; ++$_i1438) + $_size1441 = 0; + $_etype1444 = 0; + $xfer += $input->readListBegin($_etype1444, $_size1441); + for ($_i1445 = 0; $_i1445 < $_size1441; ++$_i1445) { - $elem1439 = null; - $xfer += $input->readString($elem1439); - $this->names []= $elem1439; + $elem1446 = null; + $xfer += $input->readString($elem1446); + $this->names []= $elem1446; } $xfer += $input->readListEnd(); } else { @@ -36876,9 +36932,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1440) + foreach ($this->names as $iter1447) { - $xfer += $output->writeString($iter1440); + $xfer += $output->writeString($iter1447); } } $output->writeListEnd(); @@ -36967,15 +37023,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1441 = 0; - $_etype1444 = 0; - $xfer += $input->readListBegin($_etype1444, $_size1441); - for ($_i1445 = 0; $_i1445 < $_size1441; ++$_i1445) + $_size1448 = 0; + $_etype1451 = 0; + $xfer += $input->readListBegin($_etype1451, $_size1448); + for ($_i1452 = 0; $_i1452 < $_size1448; ++$_i1452) { - $elem1446 = null; - $elem1446 = new \metastore\Partition(); - $xfer += $elem1446->read($input); - $this->success []= $elem1446; + $elem1453 = null; + $elem1453 = new \metastore\Partition(); + $xfer += $elem1453->read($input); + $this->success []= $elem1453; } $xfer += $input->readListEnd(); } else { @@ -37019,9 +37075,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1447) + foreach ($this->success as $iter1454) { - $xfer += $iter1447->write($output); + $xfer += $iter1454->write($output); } } $output->writeListEnd(); @@ -37570,15 +37626,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1448 = 0; - $_etype1451 = 0; - $xfer += $input->readListBegin($_etype1451, $_size1448); - for ($_i1452 = 0; $_i1452 < $_size1448; ++$_i1452) + $_size1455 = 0; + $_etype1458 = 0; + $xfer += $input->readListBegin($_etype1458, $_size1455); + for ($_i1459 = 0; $_i1459 < $_size1455; ++$_i1459) { - $elem1453 = null; - $elem1453 = new \metastore\Partition(); - $xfer += $elem1453->read($input); - $this->new_parts []= $elem1453; + $elem1460 = null; + $elem1460 = new \metastore\Partition(); + $xfer += $elem1460->read($input); + $this->new_parts []= $elem1460; } $xfer += $input->readListEnd(); } else { @@ -37616,9 +37672,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1454) + foreach ($this->new_parts as $iter1461) { - $xfer += $iter1454->write($output); + $xfer += $iter1461->write($output); } } $output->writeListEnd(); @@ -37833,15 +37889,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1455 = 0; - $_etype1458 = 0; - $xfer += $input->readListBegin($_etype1458, $_size1455); - for ($_i1459 = 0; $_i1459 < $_size1455; ++$_i1459) + $_size1462 = 0; + $_etype1465 = 0; + $xfer += $input->readListBegin($_etype1465, $_size1462); + for ($_i1466 = 0; $_i1466 < $_size1462; ++$_i1466) { - $elem1460 = null; - $elem1460 = new \metastore\Partition(); - $xfer += $elem1460->read($input); - $this->new_parts []= $elem1460; + $elem1467 = null; + $elem1467 = new \metastore\Partition(); + $xfer += $elem1467->read($input); + $this->new_parts []= $elem1467; } $xfer += $input->readListEnd(); } else { @@ -37887,9 +37943,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1461) + foreach ($this->new_parts as $iter1468) { - $xfer += $iter1461->write($output); + $xfer += $iter1468->write($output); } } $output->writeListEnd(); @@ -38577,14 +38633,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1462 = 0; - $_etype1465 = 0; - $xfer += $input->readListBegin($_etype1465, $_size1462); - for ($_i1466 = 0; $_i1466 < $_size1462; ++$_i1466) + $_size1469 = 0; + $_etype1472 = 0; + $xfer += $input->readListBegin($_etype1472, $_size1469); + for ($_i1473 = 0; $_i1473 < $_size1469; ++$_i1473) { - $elem1467 = null; - $xfer += $input->readString($elem1467); - $this->part_vals []= $elem1467; + $elem1474 = null; + $xfer += $input->readString($elem1474); + $this->part_vals []= $elem1474; } $xfer += $input->readListEnd(); } else { @@ -38630,9 +38686,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1468) + foreach ($this->part_vals as $iter1475) { - $xfer += $output->writeString($iter1468); + $xfer += $output->writeString($iter1475); } } $output->writeListEnd(); @@ -39027,14 +39083,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1469 = 0; - $_etype1472 = 0; - $xfer += $input->readListBegin($_etype1472, $_size1469); - for ($_i1473 = 0; $_i1473 < $_size1469; ++$_i1473) + $_size1476 = 0; + $_etype1479 = 0; + $xfer += $input->readListBegin($_etype1479, $_size1476); + for ($_i1480 = 0; $_i1480 < $_size1476; ++$_i1480) { - $elem1474 = null; - $xfer += $input->readString($elem1474); - $this->part_vals []= $elem1474; + $elem1481 = null; + $xfer += $input->readString($elem1481); + $this->part_vals []= $elem1481; } $xfer += $input->readListEnd(); } else { @@ -39069,9 +39125,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1475) + foreach ($this->part_vals as $iter1482) { - $xfer += $output->writeString($iter1475); + $xfer += $output->writeString($iter1482); } } $output->writeListEnd(); @@ -39525,14 +39581,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1476 = 0; - $_etype1479 = 0; - $xfer += $input->readListBegin($_etype1479, $_size1476); - for ($_i1480 = 0; $_i1480 < $_size1476; ++$_i1480) + $_size1483 = 0; + $_etype1486 = 0; + $xfer += $input->readListBegin($_etype1486, $_size1483); + for ($_i1487 = 0; $_i1487 < $_size1483; ++$_i1487) { - $elem1481 = null; - $xfer += $input->readString($elem1481); - $this->success []= $elem1481; + $elem1488 = null; + $xfer += $input->readString($elem1488); + $this->success []= $elem1488; } $xfer += $input->readListEnd(); } else { @@ -39568,9 +39624,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1482) + foreach ($this->success as $iter1489) { - $xfer += $output->writeString($iter1482); + $xfer += $output->writeString($iter1489); } } $output->writeListEnd(); @@ -39730,17 +39786,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1483 = 0; - $_ktype1484 = 0; - $_vtype1485 = 0; - $xfer += $input->readMapBegin($_ktype1484, $_vtype1485, $_size1483); - for ($_i1487 = 0; $_i1487 < $_size1483; ++$_i1487) + $_size1490 = 0; + $_ktype1491 = 0; + $_vtype1492 = 0; + $xfer += $input->readMapBegin($_ktype1491, $_vtype1492, $_size1490); + for ($_i1494 = 0; $_i1494 < $_size1490; ++$_i1494) { - $key1488 = ''; - $val1489 = ''; - $xfer += $input->readString($key1488); - $xfer += $input->readString($val1489); - $this->success[$key1488] = $val1489; + $key1495 = ''; + $val1496 = ''; + $xfer += $input->readString($key1495); + $xfer += $input->readString($val1496); + $this->success[$key1495] = $val1496; } $xfer += $input->readMapEnd(); } else { @@ -39776,10 +39832,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter1490 => $viter1491) + foreach ($this->success as $kiter1497 => $viter1498) { - $xfer += $output->writeString($kiter1490); - $xfer += $output->writeString($viter1491); + $xfer += $output->writeString($kiter1497); + $xfer += $output->writeString($viter1498); } } $output->writeMapEnd(); @@ -39899,17 +39955,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1492 = 0; - $_ktype1493 = 0; - $_vtype1494 = 0; - $xfer += $input->readMapBegin($_ktype1493, $_vtype1494, $_size1492); - for ($_i1496 = 0; $_i1496 < $_size1492; ++$_i1496) + $_size1499 = 0; + $_ktype1500 = 0; + $_vtype1501 = 0; + $xfer += $input->readMapBegin($_ktype1500, $_vtype1501, $_size1499); + for ($_i1503 = 0; $_i1503 < $_size1499; ++$_i1503) { - $key1497 = ''; - $val1498 = ''; - $xfer += $input->readString($key1497); - $xfer += $input->readString($val1498); - $this->part_vals[$key1497] = $val1498; + $key1504 = ''; + $val1505 = ''; + $xfer += $input->readString($key1504); + $xfer += $input->readString($val1505); + $this->part_vals[$key1504] = $val1505; } $xfer += $input->readMapEnd(); } else { @@ -39954,10 +40010,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1499 => $viter1500) + foreach ($this->part_vals as $kiter1506 => $viter1507) { - $xfer += $output->writeString($kiter1499); - $xfer += $output->writeString($viter1500); + $xfer += $output->writeString($kiter1506); + $xfer += $output->writeString($viter1507); } } $output->writeMapEnd(); @@ -40279,17 +40335,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1501 = 0; - $_ktype1502 = 0; - $_vtype1503 = 0; - $xfer += $input->readMapBegin($_ktype1502, $_vtype1503, $_size1501); - for ($_i1505 = 0; $_i1505 < $_size1501; ++$_i1505) + $_size1508 = 0; + $_ktype1509 = 0; + $_vtype1510 = 0; + $xfer += $input->readMapBegin($_ktype1509, $_vtype1510, $_size1508); + for ($_i1512 = 0; $_i1512 < $_size1508; ++$_i1512) { - $key1506 = ''; - $val1507 = ''; - $xfer += $input->readString($key1506); - $xfer += $input->readString($val1507); - $this->part_vals[$key1506] = $val1507; + $key1513 = ''; + $val1514 = ''; + $xfer += $input->readString($key1513); + $xfer += $input->readString($val1514); + $this->part_vals[$key1513] = $val1514; } $xfer += $input->readMapEnd(); } else { @@ -40334,10 +40390,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1508 => $viter1509) + foreach ($this->part_vals as $kiter1515 => $viter1516) { - $xfer += $output->writeString($kiter1508); - $xfer += $output->writeString($viter1509); + $xfer += $output->writeString($kiter1515); + $xfer += $output->writeString($viter1516); } } $output->writeMapEnd(); @@ -45862,14 +45918,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1510 = 0; - $_etype1513 = 0; - $xfer += $input->readListBegin($_etype1513, $_size1510); - for ($_i1514 = 0; $_i1514 < $_size1510; ++$_i1514) + $_size1517 = 0; + $_etype1520 = 0; + $xfer += $input->readListBegin($_etype1520, $_size1517); + for ($_i1521 = 0; $_i1521 < $_size1517; ++$_i1521) { - $elem1515 = null; - $xfer += $input->readString($elem1515); - $this->success []= $elem1515; + $elem1522 = null; + $xfer += $input->readString($elem1522); + $this->success []= $elem1522; } $xfer += $input->readListEnd(); } else { @@ -45905,9 +45961,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1516) + foreach ($this->success as $iter1523) { - $xfer += $output->writeString($iter1516); + $xfer += $output->writeString($iter1523); } } $output->writeListEnd(); @@ -46776,14 +46832,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1517 = 0; - $_etype1520 = 0; - $xfer += $input->readListBegin($_etype1520, $_size1517); - for ($_i1521 = 0; $_i1521 < $_size1517; ++$_i1521) + $_size1524 = 0; + $_etype1527 = 0; + $xfer += $input->readListBegin($_etype1527, $_size1524); + for ($_i1528 = 0; $_i1528 < $_size1524; ++$_i1528) { - $elem1522 = null; - $xfer += $input->readString($elem1522); - $this->success []= $elem1522; + $elem1529 = null; + $xfer += $input->readString($elem1529); + $this->success []= $elem1529; } $xfer += $input->readListEnd(); } else { @@ -46819,9 +46875,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1523) + foreach ($this->success as $iter1530) { - $xfer += $output->writeString($iter1523); + $xfer += $output->writeString($iter1530); } } $output->writeListEnd(); @@ -47512,15 +47568,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1524 = 0; - $_etype1527 = 0; - $xfer += $input->readListBegin($_etype1527, $_size1524); - for ($_i1528 = 0; $_i1528 < $_size1524; ++$_i1528) + $_size1531 = 0; + $_etype1534 = 0; + $xfer += $input->readListBegin($_etype1534, $_size1531); + for ($_i1535 = 0; $_i1535 < $_size1531; ++$_i1535) { - $elem1529 = null; - $elem1529 = new \metastore\Role(); - $xfer += $elem1529->read($input); - $this->success []= $elem1529; + $elem1536 = null; + $elem1536 = new \metastore\Role(); + $xfer += $elem1536->read($input); + $this->success []= $elem1536; } $xfer += $input->readListEnd(); } else { @@ -47556,9 +47612,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1530) + foreach ($this->success as $iter1537) { - $xfer += $iter1530->write($output); + $xfer += $iter1537->write($output); } } $output->writeListEnd(); @@ -48220,14 +48276,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1531 = 0; - $_etype1534 = 0; - $xfer += $input->readListBegin($_etype1534, $_size1531); - for ($_i1535 = 0; $_i1535 < $_size1531; ++$_i1535) + $_size1538 = 0; + $_etype1541 = 0; + $xfer += $input->readListBegin($_etype1541, $_size1538); + for ($_i1542 = 0; $_i1542 < $_size1538; ++$_i1542) { - $elem1536 = null; - $xfer += $input->readString($elem1536); - $this->group_names []= $elem1536; + $elem1543 = null; + $xfer += $input->readString($elem1543); + $this->group_names []= $elem1543; } $xfer += $input->readListEnd(); } else { @@ -48268,9 +48324,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1537) + foreach ($this->group_names as $iter1544) { - $xfer += $output->writeString($iter1537); + $xfer += $output->writeString($iter1544); } } $output->writeListEnd(); @@ -48578,15 +48634,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1538 = 0; - $_etype1541 = 0; - $xfer += $input->readListBegin($_etype1541, $_size1538); - for ($_i1542 = 0; $_i1542 < $_size1538; ++$_i1542) + $_size1545 = 0; + $_etype1548 = 0; + $xfer += $input->readListBegin($_etype1548, $_size1545); + for ($_i1549 = 0; $_i1549 < $_size1545; ++$_i1549) { - $elem1543 = null; - $elem1543 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1543->read($input); - $this->success []= $elem1543; + $elem1550 = null; + $elem1550 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1550->read($input); + $this->success []= $elem1550; } $xfer += $input->readListEnd(); } else { @@ -48622,9 +48678,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1544) + foreach ($this->success as $iter1551) { - $xfer += $iter1544->write($output); + $xfer += $iter1551->write($output); } } $output->writeListEnd(); @@ -49492,14 +49548,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1545 = 0; - $_etype1548 = 0; - $xfer += $input->readListBegin($_etype1548, $_size1545); - for ($_i1549 = 0; $_i1549 < $_size1545; ++$_i1549) + $_size1552 = 0; + $_etype1555 = 0; + $xfer += $input->readListBegin($_etype1555, $_size1552); + for ($_i1556 = 0; $_i1556 < $_size1552; ++$_i1556) { - $elem1550 = null; - $xfer += $input->readString($elem1550); - $this->group_names []= $elem1550; + $elem1557 = null; + $xfer += $input->readString($elem1557); + $this->group_names []= $elem1557; } $xfer += $input->readListEnd(); } else { @@ -49532,9 +49588,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1551) + foreach ($this->group_names as $iter1558) { - $xfer += $output->writeString($iter1551); + $xfer += $output->writeString($iter1558); } } $output->writeListEnd(); @@ -49610,14 +49666,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1552 = 0; - $_etype1555 = 0; - $xfer += $input->readListBegin($_etype1555, $_size1552); - for ($_i1556 = 0; $_i1556 < $_size1552; ++$_i1556) + $_size1559 = 0; + $_etype1562 = 0; + $xfer += $input->readListBegin($_etype1562, $_size1559); + for ($_i1563 = 0; $_i1563 < $_size1559; ++$_i1563) { - $elem1557 = null; - $xfer += $input->readString($elem1557); - $this->success []= $elem1557; + $elem1564 = null; + $xfer += $input->readString($elem1564); + $this->success []= $elem1564; } $xfer += $input->readListEnd(); } else { @@ -49653,9 +49709,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1558) + foreach ($this->success as $iter1565) { - $xfer += $output->writeString($iter1558); + $xfer += $output->writeString($iter1565); } } $output->writeListEnd(); @@ -50772,14 +50828,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1559 = 0; - $_etype1562 = 0; - $xfer += $input->readListBegin($_etype1562, $_size1559); - for ($_i1563 = 0; $_i1563 < $_size1559; ++$_i1563) + $_size1566 = 0; + $_etype1569 = 0; + $xfer += $input->readListBegin($_etype1569, $_size1566); + for ($_i1570 = 0; $_i1570 < $_size1566; ++$_i1570) { - $elem1564 = null; - $xfer += $input->readString($elem1564); - $this->success []= $elem1564; + $elem1571 = null; + $xfer += $input->readString($elem1571); + $this->success []= $elem1571; } $xfer += $input->readListEnd(); } else { @@ -50807,9 +50863,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1565) + foreach ($this->success as $iter1572) { - $xfer += $output->writeString($iter1565); + $xfer += $output->writeString($iter1572); } } $output->writeListEnd(); @@ -51448,14 +51504,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1566 = 0; - $_etype1569 = 0; - $xfer += $input->readListBegin($_etype1569, $_size1566); - for ($_i1570 = 0; $_i1570 < $_size1566; ++$_i1570) + $_size1573 = 0; + $_etype1576 = 0; + $xfer += $input->readListBegin($_etype1576, $_size1573); + for ($_i1577 = 0; $_i1577 < $_size1573; ++$_i1577) { - $elem1571 = null; - $xfer += $input->readString($elem1571); - $this->success []= $elem1571; + $elem1578 = null; + $xfer += $input->readString($elem1578); + $this->success []= $elem1578; } $xfer += $input->readListEnd(); } else { @@ -51483,9 +51539,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1572) + foreach ($this->success as $iter1579) { - $xfer += $output->writeString($iter1572); + $xfer += $output->writeString($iter1579); } } $output->writeListEnd(); @@ -55239,14 +55295,14 @@ class ThriftHiveMetastore_find_columns_with_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1573 = 0; - $_etype1576 = 0; - $xfer += $input->readListBegin($_etype1576, $_size1573); - for ($_i1577 = 0; $_i1577 < $_size1573; ++$_i1577) + $_size1580 = 0; + $_etype1583 = 0; + $xfer += $input->readListBegin($_etype1583, $_size1580); + for ($_i1584 = 0; $_i1584 < $_size1580; ++$_i1584) { - $elem1578 = null; - $xfer += $input->readString($elem1578); - $this->success []= $elem1578; + $elem1585 = null; + $xfer += $input->readString($elem1585); + $this->success []= $elem1585; } $xfer += $input->readListEnd(); } else { @@ -55274,9 +55330,9 @@ class ThriftHiveMetastore_find_columns_with_stats_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1579) + foreach ($this->success as $iter1586) { - $xfer += $output->writeString($iter1579); + $xfer += $output->writeString($iter1586); } } $output->writeListEnd(); @@ -63447,15 +63503,15 @@ class ThriftHiveMetastore_get_schema_all_versions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1580 = 0; - $_etype1583 = 0; - $xfer += $input->readListBegin($_etype1583, $_size1580); - for ($_i1584 = 0; $_i1584 < $_size1580; ++$_i1584) + $_size1587 = 0; + $_etype1590 = 0; + $xfer += $input->readListBegin($_etype1590, $_size1587); + for ($_i1591 = 0; $_i1591 < $_size1587; ++$_i1591) { - $elem1585 = null; - $elem1585 = new \metastore\SchemaVersion(); - $xfer += $elem1585->read($input); - $this->success []= $elem1585; + $elem1592 = null; + $elem1592 = new \metastore\SchemaVersion(); + $xfer += $elem1592->read($input); + $this->success []= $elem1592; } $xfer += $input->readListEnd(); } else { @@ -63499,9 +63555,9 @@ class ThriftHiveMetastore_get_schema_all_versions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1586) + foreach ($this->success as $iter1593) { - $xfer += $iter1586->write($output); + $xfer += $iter1593->write($output); } } $output->writeListEnd(); @@ -65370,15 +65426,15 @@ class ThriftHiveMetastore_get_runtime_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1587 = 0; - $_etype1590 = 0; - $xfer += $input->readListBegin($_etype1590, $_size1587); - for ($_i1591 = 0; $_i1591 < $_size1587; ++$_i1591) + $_size1594 = 0; + $_etype1597 = 0; + $xfer += $input->readListBegin($_etype1597, $_size1594); + for ($_i1598 = 0; $_i1598 < $_size1594; ++$_i1598) { - $elem1592 = null; - $elem1592 = new \metastore\RuntimeStat(); - $xfer += $elem1592->read($input); - $this->success []= $elem1592; + $elem1599 = null; + $elem1599 = new \metastore\RuntimeStat(); + $xfer += $elem1599->read($input); + $this->success []= $elem1599; } $xfer += $input->readListEnd(); } else { @@ -65414,9 +65470,9 @@ class ThriftHiveMetastore_get_runtime_stats_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1593) + foreach ($this->success as $iter1600) { - $xfer += $iter1593->write($output); + $xfer += $iter1600->write($output); } } $output->writeListEnd(); @@ -66429,4 +66485,161 @@ class ThriftHiveMetastore_get_scheduled_query_result { } +class ThriftHiveMetastore_add_replication_metrics_args { + static $_TSPEC; + + /** + * @var \metastore\ReplicationMetricList + */ + public $replicationMetricList = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'replicationMetricList', + 'type' => TType::STRUCT, + 'class' => '\metastore\ReplicationMetricList', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['replicationMetricList'])) { + $this->replicationMetricList = $vals['replicationMetricList']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_add_replication_metrics_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->replicationMetricList = new \metastore\ReplicationMetricList(); + $xfer += $this->replicationMetricList->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_replication_metrics_args'); + if ($this->replicationMetricList !== null) { + if (!is_object($this->replicationMetricList)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('replicationMetricList', TType::STRUCT, 1); + $xfer += $this->replicationMetricList->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_add_replication_metrics_result { + static $_TSPEC; + + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_add_replication_metrics_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_replication_metrics_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php index b1f99036a0..7194ee8bd0 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php @@ -36390,6 +36390,276 @@ class ScheduledQueryProgressInfo { } +class ReplicationMetrics { + static $_TSPEC; + + /** + * @var int + */ + public $scheduledExecutionId = null; + /** + * @var string + */ + public $policy = null; + /** + * @var int + */ + public $dumpExecutionId = null; + /** + * @var string + */ + public $metadata = null; + /** + * @var string + */ + public $progress = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'scheduledExecutionId', + 'type' => TType::I64, + ), + 2 => array( + 'var' => 'policy', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'dumpExecutionId', + 'type' => TType::I64, + ), + 4 => array( + 'var' => 'metadata', + 'type' => TType::STRING, + ), + 5 => array( + 'var' => 'progress', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['scheduledExecutionId'])) { + $this->scheduledExecutionId = $vals['scheduledExecutionId']; + } + if (isset($vals['policy'])) { + $this->policy = $vals['policy']; + } + if (isset($vals['dumpExecutionId'])) { + $this->dumpExecutionId = $vals['dumpExecutionId']; + } + if (isset($vals['metadata'])) { + $this->metadata = $vals['metadata']; + } + if (isset($vals['progress'])) { + $this->progress = $vals['progress']; + } + } + } + + public function getName() { + return 'ReplicationMetrics'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->scheduledExecutionId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->policy); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->dumpExecutionId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->metadata); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->progress); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ReplicationMetrics'); + if ($this->scheduledExecutionId !== null) { + $xfer += $output->writeFieldBegin('scheduledExecutionId', TType::I64, 1); + $xfer += $output->writeI64($this->scheduledExecutionId); + $xfer += $output->writeFieldEnd(); + } + if ($this->policy !== null) { + $xfer += $output->writeFieldBegin('policy', TType::STRING, 2); + $xfer += $output->writeString($this->policy); + $xfer += $output->writeFieldEnd(); + } + if ($this->dumpExecutionId !== null) { + $xfer += $output->writeFieldBegin('dumpExecutionId', TType::I64, 3); + $xfer += $output->writeI64($this->dumpExecutionId); + $xfer += $output->writeFieldEnd(); + } + if ($this->metadata !== null) { + $xfer += $output->writeFieldBegin('metadata', TType::STRING, 4); + $xfer += $output->writeString($this->metadata); + $xfer += $output->writeFieldEnd(); + } + if ($this->progress !== null) { + $xfer += $output->writeFieldBegin('progress', TType::STRING, 5); + $xfer += $output->writeString($this->progress); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ReplicationMetricList { + static $_TSPEC; + + /** + * @var \metastore\ReplicationMetrics[] + */ + public $replicationMetricList = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'replicationMetricList', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\ReplicationMetrics', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['replicationMetricList'])) { + $this->replicationMetricList = $vals['replicationMetricList']; + } + } + } + + public function getName() { + return 'ReplicationMetricList'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->replicationMetricList = array(); + $_size1008 = 0; + $_etype1011 = 0; + $xfer += $input->readListBegin($_etype1011, $_size1008); + for ($_i1012 = 0; $_i1012 < $_size1008; ++$_i1012) + { + $elem1013 = null; + $elem1013 = new \metastore\ReplicationMetrics(); + $xfer += $elem1013->read($input); + $this->replicationMetricList []= $elem1013; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ReplicationMetricList'); + if ($this->replicationMetricList !== null) { + if (!is_array($this->replicationMetricList)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('replicationMetricList', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->replicationMetricList)); + { + foreach ($this->replicationMetricList as $iter1014) + { + $xfer += $iter1014->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class AlterPartitionsRequest { static $_TSPEC; @@ -36529,15 +36799,15 @@ class AlterPartitionsRequest { case 4: if ($ftype == TType::LST) { $this->partitions = array(); - $_size1008 = 0; - $_etype1011 = 0; - $xfer += $input->readListBegin($_etype1011, $_size1008); - for ($_i1012 = 0; $_i1012 < $_size1008; ++$_i1012) + $_size1015 = 0; + $_etype1018 = 0; + $xfer += $input->readListBegin($_etype1018, $_size1015); + for ($_i1019 = 0; $_i1019 < $_size1015; ++$_i1019) { - $elem1013 = null; - $elem1013 = new \metastore\Partition(); - $xfer += $elem1013->read($input); - $this->partitions []= $elem1013; + $elem1020 = null; + $elem1020 = new \metastore\Partition(); + $xfer += $elem1020->read($input); + $this->partitions []= $elem1020; } $xfer += $input->readListEnd(); } else { @@ -36602,9 +36872,9 @@ class AlterPartitionsRequest { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter1014) + foreach ($this->partitions as $iter1021) { - $xfer += $iter1014->write($output); + $xfer += $iter1021->write($output); } } $output->writeListEnd(); @@ -36813,14 +37083,14 @@ class RenamePartitionRequest { case 4: if ($ftype == TType::LST) { $this->partVals = array(); - $_size1015 = 0; - $_etype1018 = 0; - $xfer += $input->readListBegin($_etype1018, $_size1015); - for ($_i1019 = 0; $_i1019 < $_size1015; ++$_i1019) + $_size1022 = 0; + $_etype1025 = 0; + $xfer += $input->readListBegin($_etype1025, $_size1022); + for ($_i1026 = 0; $_i1026 < $_size1022; ++$_i1026) { - $elem1020 = null; - $xfer += $input->readString($elem1020); - $this->partVals []= $elem1020; + $elem1027 = null; + $xfer += $input->readString($elem1027); + $this->partVals []= $elem1027; } $xfer += $input->readListEnd(); } else { @@ -36878,9 +37148,9 @@ class RenamePartitionRequest { { $output->writeListBegin(TType::STRING, count($this->partVals)); { - foreach ($this->partVals as $iter1021) + foreach ($this->partVals as $iter1028) { - $xfer += $output->writeString($iter1021); + $xfer += $output->writeString($iter1028); } } $output->writeListEnd(); @@ -37148,14 +37418,14 @@ class AlterTableRequest { case 8: if ($ftype == TType::LST) { $this->processorCapabilities = array(); - $_size1022 = 0; - $_etype1025 = 0; - $xfer += $input->readListBegin($_etype1025, $_size1022); - for ($_i1026 = 0; $_i1026 < $_size1022; ++$_i1026) + $_size1029 = 0; + $_etype1032 = 0; + $xfer += $input->readListBegin($_etype1032, $_size1029); + for ($_i1033 = 0; $_i1033 < $_size1029; ++$_i1033) { - $elem1027 = null; - $xfer += $input->readString($elem1027); - $this->processorCapabilities []= $elem1027; + $elem1034 = null; + $xfer += $input->readString($elem1034); + $this->processorCapabilities []= $elem1034; } $xfer += $input->readListEnd(); } else { @@ -37231,9 +37501,9 @@ class AlterTableRequest { { $output->writeListBegin(TType::STRING, count($this->processorCapabilities)); { - foreach ($this->processorCapabilities as $iter1028) + foreach ($this->processorCapabilities as $iter1035) { - $xfer += $output->writeString($iter1028); + $xfer += $output->writeString($iter1035); } } $output->writeListEnd(); @@ -37374,14 +37644,14 @@ class GetPartitionsProjectionSpec { case 1: if ($ftype == TType::LST) { $this->fieldList = array(); - $_size1029 = 0; - $_etype1032 = 0; - $xfer += $input->readListBegin($_etype1032, $_size1029); - for ($_i1033 = 0; $_i1033 < $_size1029; ++$_i1033) + $_size1036 = 0; + $_etype1039 = 0; + $xfer += $input->readListBegin($_etype1039, $_size1036); + for ($_i1040 = 0; $_i1040 < $_size1036; ++$_i1040) { - $elem1034 = null; - $xfer += $input->readString($elem1034); - $this->fieldList []= $elem1034; + $elem1041 = null; + $xfer += $input->readString($elem1041); + $this->fieldList []= $elem1041; } $xfer += $input->readListEnd(); } else { @@ -37423,9 +37693,9 @@ class GetPartitionsProjectionSpec { { $output->writeListBegin(TType::STRING, count($this->fieldList)); { - foreach ($this->fieldList as $iter1035) + foreach ($this->fieldList as $iter1042) { - $xfer += $output->writeString($iter1035); + $xfer += $output->writeString($iter1042); } } $output->writeListEnd(); @@ -37517,14 +37787,14 @@ class GetPartitionsFilterSpec { case 8: if ($ftype == TType::LST) { $this->filters = array(); - $_size1036 = 0; - $_etype1039 = 0; - $xfer += $input->readListBegin($_etype1039, $_size1036); - for ($_i1040 = 0; $_i1040 < $_size1036; ++$_i1040) + $_size1043 = 0; + $_etype1046 = 0; + $xfer += $input->readListBegin($_etype1046, $_size1043); + for ($_i1047 = 0; $_i1047 < $_size1043; ++$_i1047) { - $elem1041 = null; - $xfer += $input->readString($elem1041); - $this->filters []= $elem1041; + $elem1048 = null; + $xfer += $input->readString($elem1048); + $this->filters []= $elem1048; } $xfer += $input->readListEnd(); } else { @@ -37557,9 +37827,9 @@ class GetPartitionsFilterSpec { { $output->writeListBegin(TType::STRING, count($this->filters)); { - foreach ($this->filters as $iter1042) + foreach ($this->filters as $iter1049) { - $xfer += $output->writeString($iter1042); + $xfer += $output->writeString($iter1049); } } $output->writeListEnd(); @@ -37624,15 +37894,15 @@ class GetPartitionsResponse { case 1: if ($ftype == TType::LST) { $this->partitionSpec = array(); - $_size1043 = 0; - $_etype1046 = 0; - $xfer += $input->readListBegin($_etype1046, $_size1043); - for ($_i1047 = 0; $_i1047 < $_size1043; ++$_i1047) + $_size1050 = 0; + $_etype1053 = 0; + $xfer += $input->readListBegin($_etype1053, $_size1050); + for ($_i1054 = 0; $_i1054 < $_size1050; ++$_i1054) { - $elem1048 = null; - $elem1048 = new \metastore\PartitionSpec(); - $xfer += $elem1048->read($input); - $this->partitionSpec []= $elem1048; + $elem1055 = null; + $elem1055 = new \metastore\PartitionSpec(); + $xfer += $elem1055->read($input); + $this->partitionSpec []= $elem1055; } $xfer += $input->readListEnd(); } else { @@ -37660,9 +37930,9 @@ class GetPartitionsResponse { { $output->writeListBegin(TType::STRUCT, count($this->partitionSpec)); { - foreach ($this->partitionSpec as $iter1049) + foreach ($this->partitionSpec as $iter1056) { - $xfer += $iter1049->write($output); + $xfer += $iter1056->write($output); } } $output->writeListEnd(); @@ -37866,14 +38136,14 @@ class GetPartitionsRequest { case 6: if ($ftype == TType::LST) { $this->groupNames = array(); - $_size1050 = 0; - $_etype1053 = 0; - $xfer += $input->readListBegin($_etype1053, $_size1050); - for ($_i1054 = 0; $_i1054 < $_size1050; ++$_i1054) + $_size1057 = 0; + $_etype1060 = 0; + $xfer += $input->readListBegin($_etype1060, $_size1057); + for ($_i1061 = 0; $_i1061 < $_size1057; ++$_i1061) { - $elem1055 = null; - $xfer += $input->readString($elem1055); - $this->groupNames []= $elem1055; + $elem1062 = null; + $xfer += $input->readString($elem1062); + $this->groupNames []= $elem1062; } $xfer += $input->readListEnd(); } else { @@ -37899,14 +38169,14 @@ class GetPartitionsRequest { case 9: if ($ftype == TType::LST) { $this->processorCapabilities = array(); - $_size1056 = 0; - $_etype1059 = 0; - $xfer += $input->readListBegin($_etype1059, $_size1056); - for ($_i1060 = 0; $_i1060 < $_size1056; ++$_i1060) + $_size1063 = 0; + $_etype1066 = 0; + $xfer += $input->readListBegin($_etype1066, $_size1063); + for ($_i1067 = 0; $_i1067 < $_size1063; ++$_i1067) { - $elem1061 = null; - $xfer += $input->readString($elem1061); - $this->processorCapabilities []= $elem1061; + $elem1068 = null; + $xfer += $input->readString($elem1068); + $this->processorCapabilities []= $elem1068; } $xfer += $input->readListEnd(); } else { @@ -37966,9 +38236,9 @@ class GetPartitionsRequest { { $output->writeListBegin(TType::STRING, count($this->groupNames)); { - foreach ($this->groupNames as $iter1062) + foreach ($this->groupNames as $iter1069) { - $xfer += $output->writeString($iter1062); + $xfer += $output->writeString($iter1069); } } $output->writeListEnd(); @@ -37999,9 +38269,9 @@ class GetPartitionsRequest { { $output->writeListBegin(TType::STRING, count($this->processorCapabilities)); { - foreach ($this->processorCapabilities as $iter1063) + foreach ($this->processorCapabilities as $iter1070) { - $xfer += $output->writeString($iter1063); + $xfer += $output->writeString($iter1070); } } $output->writeListEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 100a62206e..af59d994e6 100755 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -257,6 +257,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' void scheduled_query_maintenance(ScheduledQueryMaintenanceRequest request)') print(' void scheduled_query_progress(ScheduledQueryProgressInfo info)') print(' ScheduledQuery get_scheduled_query(ScheduledQueryKey scheduleKey)') + print(' void add_replication_metrics(ReplicationMetricList replicationMetricList)') print(' string getName()') print(' string getVersion()') print(' fb_status getStatus()') @@ -1724,6 +1725,12 @@ elif cmd == 'get_scheduled_query': sys.exit(1) pp.pprint(client.get_scheduled_query(eval(args[0]),)) +elif cmd == 'add_replication_metrics': + if len(args) != 1: + print('add_replication_metrics requires 1 args') + sys.exit(1) + pp.pprint(client.add_replication_metrics(eval(args[0]),)) + elif cmd == 'getName': if len(args) != 0: print('getName requires 0 args') diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index a892606e61..653e2da821 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -1785,6 +1785,13 @@ def get_scheduled_query(self, scheduleKey): """ pass + def add_replication_metrics(self, replicationMetricList): + """ + Parameters: + - replicationMetricList + """ + pass + class Client(fb303.FacebookService.Client, Iface): """ @@ -10062,6 +10069,37 @@ def recv_get_scheduled_query(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_scheduled_query failed: unknown result") + def add_replication_metrics(self, replicationMetricList): + """ + Parameters: + - replicationMetricList + """ + self.send_add_replication_metrics(replicationMetricList) + self.recv_add_replication_metrics() + + def send_add_replication_metrics(self, replicationMetricList): + self._oprot.writeMessageBegin('add_replication_metrics', TMessageType.CALL, self._seqid) + args = add_replication_metrics_args() + args.replicationMetricList = replicationMetricList + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_replication_metrics(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_replication_metrics_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + class Processor(fb303.FacebookService.Processor, Iface, TProcessor): def __init__(self, handler): @@ -10299,6 +10337,7 @@ def __init__(self, handler): self._processMap["scheduled_query_maintenance"] = Processor.process_scheduled_query_maintenance self._processMap["scheduled_query_progress"] = Processor.process_scheduled_query_progress self._processMap["get_scheduled_query"] = Processor.process_get_scheduled_query + self._processMap["add_replication_metrics"] = Processor.process_add_replication_metrics def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() @@ -16047,6 +16086,28 @@ def process_get_scheduled_query(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_add_replication_metrics(self, seqid, iprot, oprot): + args = add_replication_metrics_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_replication_metrics_result() + try: + self._handler.add_replication_metrics(args.replicationMetricList) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("add_replication_metrics", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + # HELPER FUNCTIONS AND STRUCTURES @@ -17856,10 +17917,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1064, _size1061) = iprot.readListBegin() - for _i1065 in xrange(_size1061): - _elem1066 = iprot.readString() - self.success.append(_elem1066) + (_etype1071, _size1068) = iprot.readListBegin() + for _i1072 in xrange(_size1068): + _elem1073 = iprot.readString() + self.success.append(_elem1073) iprot.readListEnd() else: iprot.skip(ftype) @@ -17882,8 +17943,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1067 in self.success: - oprot.writeString(iter1067) + for iter1074 in self.success: + oprot.writeString(iter1074) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17988,10 +18049,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1071, _size1068) = iprot.readListBegin() - for _i1072 in xrange(_size1068): - _elem1073 = iprot.readString() - self.success.append(_elem1073) + (_etype1078, _size1075) = iprot.readListBegin() + for _i1079 in xrange(_size1075): + _elem1080 = iprot.readString() + self.success.append(_elem1080) iprot.readListEnd() else: iprot.skip(ftype) @@ -18014,8 +18075,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1074 in self.success: - oprot.writeString(iter1074) + for iter1081 in self.success: + oprot.writeString(iter1081) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18785,12 +18846,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1076, _vtype1077, _size1075 ) = iprot.readMapBegin() - for _i1079 in xrange(_size1075): - _key1080 = iprot.readString() - _val1081 = Type() - _val1081.read(iprot) - self.success[_key1080] = _val1081 + (_ktype1083, _vtype1084, _size1082 ) = iprot.readMapBegin() + for _i1086 in xrange(_size1082): + _key1087 = iprot.readString() + _val1088 = Type() + _val1088.read(iprot) + self.success[_key1087] = _val1088 iprot.readMapEnd() else: iprot.skip(ftype) @@ -18813,9 +18874,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter1082,viter1083 in self.success.items(): - oprot.writeString(kiter1082) - viter1083.write(oprot) + for kiter1089,viter1090 in self.success.items(): + oprot.writeString(kiter1089) + viter1090.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -18958,11 +19019,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1087, _size1084) = iprot.readListBegin() - for _i1088 in xrange(_size1084): - _elem1089 = FieldSchema() - _elem1089.read(iprot) - self.success.append(_elem1089) + (_etype1094, _size1091) = iprot.readListBegin() + for _i1095 in xrange(_size1091): + _elem1096 = FieldSchema() + _elem1096.read(iprot) + self.success.append(_elem1096) iprot.readListEnd() else: iprot.skip(ftype) @@ -18997,8 +19058,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1090 in self.success: - iter1090.write(oprot) + for iter1097 in self.success: + iter1097.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19147,199 +19208,6 @@ class get_fields_with_environment_context_result: (3, TType.STRUCT, 'o3', (UnknownDBException, UnknownDBException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o1=None, o2=None, o3=None,): - self.success = success - self.o1 = o1 - self.o2 = o2 - self.o3 = o3 - - def read(self, iprot): - if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: - fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) - return - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - if fid == 0: - if ftype == TType.LIST: - self.success = [] - (_etype1094, _size1091) = iprot.readListBegin() - for _i1095 in xrange(_size1091): - _elem1096 = FieldSchema() - _elem1096.read(iprot) - self.success.append(_elem1096) - iprot.readListEnd() - else: - iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRUCT: - self.o2 = UnknownTableException() - self.o2.read(iprot) - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.STRUCT: - self.o3 = UnknownDBException() - self.o3.read(iprot) - else: - iprot.skip(ftype) - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: - oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) - return - oprot.writeStructBegin('get_fields_with_environment_context_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1097 in self.success: - iter1097.write(oprot) - oprot.writeListEnd() - oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) - oprot.writeFieldEnd() - if self.o3 is not None: - oprot.writeFieldBegin('o3', TType.STRUCT, 3) - self.o3.write(oprot) - oprot.writeFieldEnd() - oprot.writeFieldStop() - oprot.writeStructEnd() - - def validate(self): - return - - - def __hash__(self): - value = 17 - value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) - value = (value * 31) ^ hash(self.o3) - return value - - def __repr__(self): - L = ['%s=%r' % (key, value) - for key, value in self.__dict__.iteritems()] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not (self == other) - -class get_schema_args: - """ - Attributes: - - db_name - - table_name - """ - - thrift_spec = ( - None, # 0 - (1, TType.STRING, 'db_name', None, None, ), # 1 - (2, TType.STRING, 'table_name', None, None, ), # 2 - ) - - def __init__(self, db_name=None, table_name=None,): - self.db_name = db_name - self.table_name = table_name - - def read(self, iprot): - if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: - fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) - return - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - if fid == 1: - if ftype == TType.STRING: - self.db_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.table_name = iprot.readString() - else: - iprot.skip(ftype) - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: - oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) - return - oprot.writeStructBegin('get_schema_args') - if self.db_name is not None: - oprot.writeFieldBegin('db_name', TType.STRING, 1) - oprot.writeString(self.db_name) - oprot.writeFieldEnd() - if self.table_name is not None: - oprot.writeFieldBegin('table_name', TType.STRING, 2) - oprot.writeString(self.table_name) - oprot.writeFieldEnd() - oprot.writeFieldStop() - oprot.writeStructEnd() - - def validate(self): - return - - - def __hash__(self): - value = 17 - value = (value * 31) ^ hash(self.db_name) - value = (value * 31) ^ hash(self.table_name) - return value - - def __repr__(self): - L = ['%s=%r' % (key, value) - for key, value in self.__dict__.iteritems()] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not (self == other) - -class get_schema_result: - """ - Attributes: - - success - - o1 - - o2 - - o3 - """ - - thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (UnknownTableException, UnknownTableException.thrift_spec), None, ), # 2 - (3, TType.STRUCT, 'o3', (UnknownDBException, UnknownDBException.thrift_spec), None, ), # 3 - ) - def __init__(self, success=None, o1=None, o2=None, o3=None,): self.success = success self.o1 = o1 @@ -19393,7 +19261,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_schema_result') + oprot.writeStructBegin('get_fields_with_environment_context_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) @@ -19439,6 +19307,199 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class get_schema_args: + """ + Attributes: + - db_name + - table_name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'db_name', None, None, ), # 1 + (2, TType.STRING, 'table_name', None, None, ), # 2 + ) + + def __init__(self, db_name=None, table_name=None,): + self.db_name = db_name + self.table_name = table_name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.table_name = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_schema_args') + if self.db_name is not None: + oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeString(self.db_name) + oprot.writeFieldEnd() + if self.table_name is not None: + oprot.writeFieldBegin('table_name', TType.STRING, 2) + oprot.writeString(self.table_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.db_name) + value = (value * 31) ^ hash(self.table_name) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_schema_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (UnknownTableException, UnknownTableException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (UnknownDBException, UnknownDBException.thrift_spec), None, ), # 3 + ) + + def __init__(self, success=None, o1=None, o2=None, o3=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1108, _size1105) = iprot.readListBegin() + for _i1109 in xrange(_size1105): + _elem1110 = FieldSchema() + _elem1110.read(iprot) + self.success.append(_elem1110) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = UnknownTableException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_schema_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1111 in self.success: + iter1111.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class get_schema_with_environment_context_args: """ Attributes: @@ -19565,11 +19626,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1108, _size1105) = iprot.readListBegin() - for _i1109 in xrange(_size1105): - _elem1110 = FieldSchema() - _elem1110.read(iprot) - self.success.append(_elem1110) + (_etype1115, _size1112) = iprot.readListBegin() + for _i1116 in xrange(_size1112): + _elem1117 = FieldSchema() + _elem1117.read(iprot) + self.success.append(_elem1117) iprot.readListEnd() else: iprot.skip(ftype) @@ -19604,8 +19665,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1111 in self.success: - iter1111.write(oprot) + for iter1118 in self.success: + iter1118.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20058,66 +20119,66 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype1115, _size1112) = iprot.readListBegin() - for _i1116 in xrange(_size1112): - _elem1117 = SQLPrimaryKey() - _elem1117.read(iprot) - self.primaryKeys.append(_elem1117) + (_etype1122, _size1119) = iprot.readListBegin() + for _i1123 in xrange(_size1119): + _elem1124 = SQLPrimaryKey() + _elem1124.read(iprot) + self.primaryKeys.append(_elem1124) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype1121, _size1118) = iprot.readListBegin() - for _i1122 in xrange(_size1118): - _elem1123 = SQLForeignKey() - _elem1123.read(iprot) - self.foreignKeys.append(_elem1123) + (_etype1128, _size1125) = iprot.readListBegin() + for _i1129 in xrange(_size1125): + _elem1130 = SQLForeignKey() + _elem1130.read(iprot) + self.foreignKeys.append(_elem1130) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype1127, _size1124) = iprot.readListBegin() - for _i1128 in xrange(_size1124): - _elem1129 = SQLUniqueConstraint() - _elem1129.read(iprot) - self.uniqueConstraints.append(_elem1129) + (_etype1134, _size1131) = iprot.readListBegin() + for _i1135 in xrange(_size1131): + _elem1136 = SQLUniqueConstraint() + _elem1136.read(iprot) + self.uniqueConstraints.append(_elem1136) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype1133, _size1130) = iprot.readListBegin() - for _i1134 in xrange(_size1130): - _elem1135 = SQLNotNullConstraint() - _elem1135.read(iprot) - self.notNullConstraints.append(_elem1135) + (_etype1140, _size1137) = iprot.readListBegin() + for _i1141 in xrange(_size1137): + _elem1142 = SQLNotNullConstraint() + _elem1142.read(iprot) + self.notNullConstraints.append(_elem1142) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype1139, _size1136) = iprot.readListBegin() - for _i1140 in xrange(_size1136): - _elem1141 = SQLDefaultConstraint() - _elem1141.read(iprot) - self.defaultConstraints.append(_elem1141) + (_etype1146, _size1143) = iprot.readListBegin() + for _i1147 in xrange(_size1143): + _elem1148 = SQLDefaultConstraint() + _elem1148.read(iprot) + self.defaultConstraints.append(_elem1148) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.checkConstraints = [] - (_etype1145, _size1142) = iprot.readListBegin() - for _i1146 in xrange(_size1142): - _elem1147 = SQLCheckConstraint() - _elem1147.read(iprot) - self.checkConstraints.append(_elem1147) + (_etype1152, _size1149) = iprot.readListBegin() + for _i1153 in xrange(_size1149): + _elem1154 = SQLCheckConstraint() + _elem1154.read(iprot) + self.checkConstraints.append(_elem1154) iprot.readListEnd() else: iprot.skip(ftype) @@ -20138,43 +20199,43 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter1148 in self.primaryKeys: - iter1148.write(oprot) + for iter1155 in self.primaryKeys: + iter1155.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter1149 in self.foreignKeys: - iter1149.write(oprot) + for iter1156 in self.foreignKeys: + iter1156.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter1150 in self.uniqueConstraints: - iter1150.write(oprot) + for iter1157 in self.uniqueConstraints: + iter1157.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter1151 in self.notNullConstraints: - iter1151.write(oprot) + for iter1158 in self.notNullConstraints: + iter1158.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter1152 in self.defaultConstraints: - iter1152.write(oprot) + for iter1159 in self.defaultConstraints: + iter1159.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.checkConstraints is not None: oprot.writeFieldBegin('checkConstraints', TType.LIST, 7) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) - for iter1153 in self.checkConstraints: - iter1153.write(oprot) + for iter1160 in self.checkConstraints: + iter1160.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21908,10 +21969,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partNames = [] - (_etype1157, _size1154) = iprot.readListBegin() - for _i1158 in xrange(_size1154): - _elem1159 = iprot.readString() - self.partNames.append(_elem1159) + (_etype1164, _size1161) = iprot.readListBegin() + for _i1165 in xrange(_size1161): + _elem1166 = iprot.readString() + self.partNames.append(_elem1166) iprot.readListEnd() else: iprot.skip(ftype) @@ -21936,8 +21997,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter1160 in self.partNames: - oprot.writeString(iter1160) + for iter1167 in self.partNames: + oprot.writeString(iter1167) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22282,10 +22343,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1164, _size1161) = iprot.readListBegin() - for _i1165 in xrange(_size1161): - _elem1166 = iprot.readString() - self.success.append(_elem1166) + (_etype1171, _size1168) = iprot.readListBegin() + for _i1172 in xrange(_size1168): + _elem1173 = iprot.readString() + self.success.append(_elem1173) iprot.readListEnd() else: iprot.skip(ftype) @@ -22308,8 +22369,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1167 in self.success: - oprot.writeString(iter1167) + for iter1174 in self.success: + oprot.writeString(iter1174) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22459,10 +22520,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1171, _size1168) = iprot.readListBegin() - for _i1172 in xrange(_size1168): - _elem1173 = iprot.readString() - self.success.append(_elem1173) + (_etype1178, _size1175) = iprot.readListBegin() + for _i1179 in xrange(_size1175): + _elem1180 = iprot.readString() + self.success.append(_elem1180) iprot.readListEnd() else: iprot.skip(ftype) @@ -22485,8 +22546,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1174 in self.success: - oprot.writeString(iter1174) + for iter1181 in self.success: + oprot.writeString(iter1181) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22591,11 +22652,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1178, _size1175) = iprot.readListBegin() - for _i1179 in xrange(_size1175): - _elem1180 = Table() - _elem1180.read(iprot) - self.success.append(_elem1180) + (_etype1185, _size1182) = iprot.readListBegin() + for _i1186 in xrange(_size1182): + _elem1187 = Table() + _elem1187.read(iprot) + self.success.append(_elem1187) iprot.readListEnd() else: iprot.skip(ftype) @@ -22618,8 +22679,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1181 in self.success: - iter1181.write(oprot) + for iter1188 in self.success: + iter1188.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22743,10 +22804,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1185, _size1182) = iprot.readListBegin() - for _i1186 in xrange(_size1182): - _elem1187 = iprot.readString() - self.success.append(_elem1187) + (_etype1192, _size1189) = iprot.readListBegin() + for _i1193 in xrange(_size1189): + _elem1194 = iprot.readString() + self.success.append(_elem1194) iprot.readListEnd() else: iprot.skip(ftype) @@ -22769,8 +22830,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1188 in self.success: - oprot.writeString(iter1188) + for iter1195 in self.success: + oprot.writeString(iter1195) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22843,10 +22904,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype1192, _size1189) = iprot.readListBegin() - for _i1193 in xrange(_size1189): - _elem1194 = iprot.readString() - self.tbl_types.append(_elem1194) + (_etype1199, _size1196) = iprot.readListBegin() + for _i1200 in xrange(_size1196): + _elem1201 = iprot.readString() + self.tbl_types.append(_elem1201) iprot.readListEnd() else: iprot.skip(ftype) @@ -22871,8 +22932,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter1195 in self.tbl_types: - oprot.writeString(iter1195) + for iter1202 in self.tbl_types: + oprot.writeString(iter1202) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22928,11 +22989,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1199, _size1196) = iprot.readListBegin() - for _i1200 in xrange(_size1196): - _elem1201 = TableMeta() - _elem1201.read(iprot) - self.success.append(_elem1201) + (_etype1206, _size1203) = iprot.readListBegin() + for _i1207 in xrange(_size1203): + _elem1208 = TableMeta() + _elem1208.read(iprot) + self.success.append(_elem1208) iprot.readListEnd() else: iprot.skip(ftype) @@ -22955,8 +23016,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1202 in self.success: - iter1202.write(oprot) + for iter1209 in self.success: + iter1209.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23080,10 +23141,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1206, _size1203) = iprot.readListBegin() - for _i1207 in xrange(_size1203): - _elem1208 = iprot.readString() - self.success.append(_elem1208) + (_etype1213, _size1210) = iprot.readListBegin() + for _i1214 in xrange(_size1210): + _elem1215 = iprot.readString() + self.success.append(_elem1215) iprot.readListEnd() else: iprot.skip(ftype) @@ -23106,8 +23167,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1209 in self.success: - oprot.writeString(iter1209) + for iter1216 in self.success: + oprot.writeString(iter1216) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23343,10 +23404,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype1213, _size1210) = iprot.readListBegin() - for _i1214 in xrange(_size1210): - _elem1215 = iprot.readString() - self.tbl_names.append(_elem1215) + (_etype1220, _size1217) = iprot.readListBegin() + for _i1221 in xrange(_size1217): + _elem1222 = iprot.readString() + self.tbl_names.append(_elem1222) iprot.readListEnd() else: iprot.skip(ftype) @@ -23367,8 +23428,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter1216 in self.tbl_names: - oprot.writeString(iter1216) + for iter1223 in self.tbl_names: + oprot.writeString(iter1223) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23420,11 +23481,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1220, _size1217) = iprot.readListBegin() - for _i1221 in xrange(_size1217): - _elem1222 = Table() - _elem1222.read(iprot) - self.success.append(_elem1222) + (_etype1227, _size1224) = iprot.readListBegin() + for _i1228 in xrange(_size1224): + _elem1229 = Table() + _elem1229.read(iprot) + self.success.append(_elem1229) iprot.readListEnd() else: iprot.skip(ftype) @@ -23441,8 +23502,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1223 in self.success: - iter1223.write(oprot) + for iter1230 in self.success: + iter1230.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23562,11 +23623,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1227, _size1224) = iprot.readListBegin() - for _i1228 in xrange(_size1224): - _elem1229 = ExtendedTableInfo() - _elem1229.read(iprot) - self.success.append(_elem1229) + (_etype1234, _size1231) = iprot.readListBegin() + for _i1235 in xrange(_size1231): + _elem1236 = ExtendedTableInfo() + _elem1236.read(iprot) + self.success.append(_elem1236) iprot.readListEnd() else: iprot.skip(ftype) @@ -23589,8 +23650,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1230 in self.success: - iter1230.write(oprot) + for iter1237 in self.success: + iter1237.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24463,10 +24524,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1234, _size1231) = iprot.readListBegin() - for _i1235 in xrange(_size1231): - _elem1236 = iprot.readString() - self.success.append(_elem1236) + (_etype1241, _size1238) = iprot.readListBegin() + for _i1242 in xrange(_size1238): + _elem1243 = iprot.readString() + self.success.append(_elem1243) iprot.readListEnd() else: iprot.skip(ftype) @@ -24501,8 +24562,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1237 in self.success: - oprot.writeString(iter1237) + for iter1244 in self.success: + oprot.writeString(iter1244) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25631,11 +25692,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1241, _size1238) = iprot.readListBegin() - for _i1242 in xrange(_size1238): - _elem1243 = Partition() - _elem1243.read(iprot) - self.new_parts.append(_elem1243) + (_etype1248, _size1245) = iprot.readListBegin() + for _i1249 in xrange(_size1245): + _elem1250 = Partition() + _elem1250.read(iprot) + self.new_parts.append(_elem1250) iprot.readListEnd() else: iprot.skip(ftype) @@ -25652,8 +25713,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1244 in self.new_parts: - iter1244.write(oprot) + for iter1251 in self.new_parts: + iter1251.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25811,11 +25872,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1248, _size1245) = iprot.readListBegin() - for _i1249 in xrange(_size1245): - _elem1250 = PartitionSpec() - _elem1250.read(iprot) - self.new_parts.append(_elem1250) + (_etype1255, _size1252) = iprot.readListBegin() + for _i1256 in xrange(_size1252): + _elem1257 = PartitionSpec() + _elem1257.read(iprot) + self.new_parts.append(_elem1257) iprot.readListEnd() else: iprot.skip(ftype) @@ -25832,8 +25893,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1251 in self.new_parts: - iter1251.write(oprot) + for iter1258 in self.new_parts: + iter1258.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26007,10 +26068,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1255, _size1252) = iprot.readListBegin() - for _i1256 in xrange(_size1252): - _elem1257 = iprot.readString() - self.part_vals.append(_elem1257) + (_etype1262, _size1259) = iprot.readListBegin() + for _i1263 in xrange(_size1259): + _elem1264 = iprot.readString() + self.part_vals.append(_elem1264) iprot.readListEnd() else: iprot.skip(ftype) @@ -26035,8 +26096,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1258 in self.part_vals: - oprot.writeString(iter1258) + for iter1265 in self.part_vals: + oprot.writeString(iter1265) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26389,10 +26450,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1262, _size1259) = iprot.readListBegin() - for _i1263 in xrange(_size1259): - _elem1264 = iprot.readString() - self.part_vals.append(_elem1264) + (_etype1269, _size1266) = iprot.readListBegin() + for _i1270 in xrange(_size1266): + _elem1271 = iprot.readString() + self.part_vals.append(_elem1271) iprot.readListEnd() else: iprot.skip(ftype) @@ -26423,8 +26484,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1265 in self.part_vals: - oprot.writeString(iter1265) + for iter1272 in self.part_vals: + oprot.writeString(iter1272) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -27019,10 +27080,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1269, _size1266) = iprot.readListBegin() - for _i1270 in xrange(_size1266): - _elem1271 = iprot.readString() - self.part_vals.append(_elem1271) + (_etype1276, _size1273) = iprot.readListBegin() + for _i1277 in xrange(_size1273): + _elem1278 = iprot.readString() + self.part_vals.append(_elem1278) iprot.readListEnd() else: iprot.skip(ftype) @@ -27052,8 +27113,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1272 in self.part_vals: - oprot.writeString(iter1272) + for iter1279 in self.part_vals: + oprot.writeString(iter1279) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -27226,10 +27287,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1276, _size1273) = iprot.readListBegin() - for _i1277 in xrange(_size1273): - _elem1278 = iprot.readString() - self.part_vals.append(_elem1278) + (_etype1283, _size1280) = iprot.readListBegin() + for _i1284 in xrange(_size1280): + _elem1285 = iprot.readString() + self.part_vals.append(_elem1285) iprot.readListEnd() else: iprot.skip(ftype) @@ -27265,8 +27326,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1279 in self.part_vals: - oprot.writeString(iter1279) + for iter1286 in self.part_vals: + oprot.writeString(iter1286) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -28003,10 +28064,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1283, _size1280) = iprot.readListBegin() - for _i1284 in xrange(_size1280): - _elem1285 = iprot.readString() - self.part_vals.append(_elem1285) + (_etype1290, _size1287) = iprot.readListBegin() + for _i1291 in xrange(_size1287): + _elem1292 = iprot.readString() + self.part_vals.append(_elem1292) iprot.readListEnd() else: iprot.skip(ftype) @@ -28031,8 +28092,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1286 in self.part_vals: - oprot.writeString(iter1286) + for iter1293 in self.part_vals: + oprot.writeString(iter1293) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -28191,11 +28252,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1288, _vtype1289, _size1287 ) = iprot.readMapBegin() - for _i1291 in xrange(_size1287): - _key1292 = iprot.readString() - _val1293 = iprot.readString() - self.partitionSpecs[_key1292] = _val1293 + (_ktype1295, _vtype1296, _size1294 ) = iprot.readMapBegin() + for _i1298 in xrange(_size1294): + _key1299 = iprot.readString() + _val1300 = iprot.readString() + self.partitionSpecs[_key1299] = _val1300 iprot.readMapEnd() else: iprot.skip(ftype) @@ -28232,9 +28293,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1294,viter1295 in self.partitionSpecs.items(): - oprot.writeString(kiter1294) - oprot.writeString(viter1295) + for kiter1301,viter1302 in self.partitionSpecs.items(): + oprot.writeString(kiter1301) + oprot.writeString(viter1302) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -28439,11 +28500,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1297, _vtype1298, _size1296 ) = iprot.readMapBegin() - for _i1300 in xrange(_size1296): - _key1301 = iprot.readString() - _val1302 = iprot.readString() - self.partitionSpecs[_key1301] = _val1302 + (_ktype1304, _vtype1305, _size1303 ) = iprot.readMapBegin() + for _i1307 in xrange(_size1303): + _key1308 = iprot.readString() + _val1309 = iprot.readString() + self.partitionSpecs[_key1308] = _val1309 iprot.readMapEnd() else: iprot.skip(ftype) @@ -28480,9 +28541,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1303,viter1304 in self.partitionSpecs.items(): - oprot.writeString(kiter1303) - oprot.writeString(viter1304) + for kiter1310,viter1311 in self.partitionSpecs.items(): + oprot.writeString(kiter1310) + oprot.writeString(viter1311) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -28565,11 +28626,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1308, _size1305) = iprot.readListBegin() - for _i1309 in xrange(_size1305): - _elem1310 = Partition() - _elem1310.read(iprot) - self.success.append(_elem1310) + (_etype1315, _size1312) = iprot.readListBegin() + for _i1316 in xrange(_size1312): + _elem1317 = Partition() + _elem1317.read(iprot) + self.success.append(_elem1317) iprot.readListEnd() else: iprot.skip(ftype) @@ -28610,8 +28671,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1311 in self.success: - iter1311.write(oprot) + for iter1318 in self.success: + iter1318.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28705,10 +28766,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1315, _size1312) = iprot.readListBegin() - for _i1316 in xrange(_size1312): - _elem1317 = iprot.readString() - self.part_vals.append(_elem1317) + (_etype1322, _size1319) = iprot.readListBegin() + for _i1323 in xrange(_size1319): + _elem1324 = iprot.readString() + self.part_vals.append(_elem1324) iprot.readListEnd() else: iprot.skip(ftype) @@ -28720,10 +28781,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1321, _size1318) = iprot.readListBegin() - for _i1322 in xrange(_size1318): - _elem1323 = iprot.readString() - self.group_names.append(_elem1323) + (_etype1328, _size1325) = iprot.readListBegin() + for _i1329 in xrange(_size1325): + _elem1330 = iprot.readString() + self.group_names.append(_elem1330) iprot.readListEnd() else: iprot.skip(ftype) @@ -28748,8 +28809,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1324 in self.part_vals: - oprot.writeString(iter1324) + for iter1331 in self.part_vals: + oprot.writeString(iter1331) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -28759,8 +28820,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1325 in self.group_names: - oprot.writeString(iter1325) + for iter1332 in self.group_names: + oprot.writeString(iter1332) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -29189,11 +29250,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1329, _size1326) = iprot.readListBegin() - for _i1330 in xrange(_size1326): - _elem1331 = Partition() - _elem1331.read(iprot) - self.success.append(_elem1331) + (_etype1336, _size1333) = iprot.readListBegin() + for _i1337 in xrange(_size1333): + _elem1338 = Partition() + _elem1338.read(iprot) + self.success.append(_elem1338) iprot.readListEnd() else: iprot.skip(ftype) @@ -29222,8 +29283,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1332 in self.success: - iter1332.write(oprot) + for iter1339 in self.success: + iter1339.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29317,10 +29378,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1336, _size1333) = iprot.readListBegin() - for _i1337 in xrange(_size1333): - _elem1338 = iprot.readString() - self.group_names.append(_elem1338) + (_etype1343, _size1340) = iprot.readListBegin() + for _i1344 in xrange(_size1340): + _elem1345 = iprot.readString() + self.group_names.append(_elem1345) iprot.readListEnd() else: iprot.skip(ftype) @@ -29353,8 +29414,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1339 in self.group_names: - oprot.writeString(iter1339) + for iter1346 in self.group_names: + oprot.writeString(iter1346) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -29415,11 +29476,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1343, _size1340) = iprot.readListBegin() - for _i1344 in xrange(_size1340): - _elem1345 = Partition() - _elem1345.read(iprot) - self.success.append(_elem1345) + (_etype1350, _size1347) = iprot.readListBegin() + for _i1351 in xrange(_size1347): + _elem1352 = Partition() + _elem1352.read(iprot) + self.success.append(_elem1352) iprot.readListEnd() else: iprot.skip(ftype) @@ -29448,8 +29509,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1346 in self.success: - iter1346.write(oprot) + for iter1353 in self.success: + iter1353.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29607,11 +29668,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1350, _size1347) = iprot.readListBegin() - for _i1351 in xrange(_size1347): - _elem1352 = PartitionSpec() - _elem1352.read(iprot) - self.success.append(_elem1352) + (_etype1357, _size1354) = iprot.readListBegin() + for _i1358 in xrange(_size1354): + _elem1359 = PartitionSpec() + _elem1359.read(iprot) + self.success.append(_elem1359) iprot.readListEnd() else: iprot.skip(ftype) @@ -29640,8 +29701,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1353 in self.success: - iter1353.write(oprot) + for iter1360 in self.success: + iter1360.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29799,10 +29860,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1357, _size1354) = iprot.readListBegin() - for _i1358 in xrange(_size1354): - _elem1359 = iprot.readString() - self.success.append(_elem1359) + (_etype1364, _size1361) = iprot.readListBegin() + for _i1365 in xrange(_size1361): + _elem1366 = iprot.readString() + self.success.append(_elem1366) iprot.readListEnd() else: iprot.skip(ftype) @@ -29831,8 +29892,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1360 in self.success: - oprot.writeString(iter1360) + for iter1367 in self.success: + oprot.writeString(iter1367) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30072,10 +30133,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1364, _size1361) = iprot.readListBegin() - for _i1365 in xrange(_size1361): - _elem1366 = iprot.readString() - self.part_vals.append(_elem1366) + (_etype1371, _size1368) = iprot.readListBegin() + for _i1372 in xrange(_size1368): + _elem1373 = iprot.readString() + self.part_vals.append(_elem1373) iprot.readListEnd() else: iprot.skip(ftype) @@ -30105,8 +30166,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1367 in self.part_vals: - oprot.writeString(iter1367) + for iter1374 in self.part_vals: + oprot.writeString(iter1374) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -30170,11 +30231,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1371, _size1368) = iprot.readListBegin() - for _i1372 in xrange(_size1368): - _elem1373 = Partition() - _elem1373.read(iprot) - self.success.append(_elem1373) + (_etype1378, _size1375) = iprot.readListBegin() + for _i1379 in xrange(_size1375): + _elem1380 = Partition() + _elem1380.read(iprot) + self.success.append(_elem1380) iprot.readListEnd() else: iprot.skip(ftype) @@ -30203,8 +30264,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1374 in self.success: - iter1374.write(oprot) + for iter1381 in self.success: + iter1381.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30291,10 +30352,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1378, _size1375) = iprot.readListBegin() - for _i1379 in xrange(_size1375): - _elem1380 = iprot.readString() - self.part_vals.append(_elem1380) + (_etype1385, _size1382) = iprot.readListBegin() + for _i1386 in xrange(_size1382): + _elem1387 = iprot.readString() + self.part_vals.append(_elem1387) iprot.readListEnd() else: iprot.skip(ftype) @@ -30311,10 +30372,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1384, _size1381) = iprot.readListBegin() - for _i1385 in xrange(_size1381): - _elem1386 = iprot.readString() - self.group_names.append(_elem1386) + (_etype1391, _size1388) = iprot.readListBegin() + for _i1392 in xrange(_size1388): + _elem1393 = iprot.readString() + self.group_names.append(_elem1393) iprot.readListEnd() else: iprot.skip(ftype) @@ -30339,8 +30400,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1387 in self.part_vals: - oprot.writeString(iter1387) + for iter1394 in self.part_vals: + oprot.writeString(iter1394) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -30354,8 +30415,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1388 in self.group_names: - oprot.writeString(iter1388) + for iter1395 in self.group_names: + oprot.writeString(iter1395) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -30417,11 +30478,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1392, _size1389) = iprot.readListBegin() - for _i1393 in xrange(_size1389): - _elem1394 = Partition() - _elem1394.read(iprot) - self.success.append(_elem1394) + (_etype1399, _size1396) = iprot.readListBegin() + for _i1400 in xrange(_size1396): + _elem1401 = Partition() + _elem1401.read(iprot) + self.success.append(_elem1401) iprot.readListEnd() else: iprot.skip(ftype) @@ -30450,8 +30511,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1395 in self.success: - iter1395.write(oprot) + for iter1402 in self.success: + iter1402.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30532,10 +30593,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1399, _size1396) = iprot.readListBegin() - for _i1400 in xrange(_size1396): - _elem1401 = iprot.readString() - self.part_vals.append(_elem1401) + (_etype1406, _size1403) = iprot.readListBegin() + for _i1407 in xrange(_size1403): + _elem1408 = iprot.readString() + self.part_vals.append(_elem1408) iprot.readListEnd() else: iprot.skip(ftype) @@ -30565,8 +30626,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1402 in self.part_vals: - oprot.writeString(iter1402) + for iter1409 in self.part_vals: + oprot.writeString(iter1409) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -30630,10 +30691,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1406, _size1403) = iprot.readListBegin() - for _i1407 in xrange(_size1403): - _elem1408 = iprot.readString() - self.success.append(_elem1408) + (_etype1413, _size1410) = iprot.readListBegin() + for _i1414 in xrange(_size1410): + _elem1415 = iprot.readString() + self.success.append(_elem1415) iprot.readListEnd() else: iprot.skip(ftype) @@ -30662,8 +30723,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1409 in self.success: - oprot.writeString(iter1409) + for iter1416 in self.success: + oprot.writeString(iter1416) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30796,10 +30857,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1413, _size1410) = iprot.readListBegin() - for _i1414 in xrange(_size1410): - _elem1415 = iprot.readString() - self.success.append(_elem1415) + (_etype1420, _size1417) = iprot.readListBegin() + for _i1421 in xrange(_size1417): + _elem1422 = iprot.readString() + self.success.append(_elem1422) iprot.readListEnd() else: iprot.skip(ftype) @@ -30828,8 +30889,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1416 in self.success: - oprot.writeString(iter1416) + for iter1423 in self.success: + oprot.writeString(iter1423) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31000,11 +31061,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1420, _size1417) = iprot.readListBegin() - for _i1421 in xrange(_size1417): - _elem1422 = Partition() - _elem1422.read(iprot) - self.success.append(_elem1422) + (_etype1427, _size1424) = iprot.readListBegin() + for _i1428 in xrange(_size1424): + _elem1429 = Partition() + _elem1429.read(iprot) + self.success.append(_elem1429) iprot.readListEnd() else: iprot.skip(ftype) @@ -31033,8 +31094,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1423 in self.success: - iter1423.write(oprot) + for iter1430 in self.success: + iter1430.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31205,11 +31266,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1427, _size1424) = iprot.readListBegin() - for _i1428 in xrange(_size1424): - _elem1429 = PartitionSpec() - _elem1429.read(iprot) - self.success.append(_elem1429) + (_etype1434, _size1431) = iprot.readListBegin() + for _i1435 in xrange(_size1431): + _elem1436 = PartitionSpec() + _elem1436.read(iprot) + self.success.append(_elem1436) iprot.readListEnd() else: iprot.skip(ftype) @@ -31238,8 +31299,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1430 in self.success: - iter1430.write(oprot) + for iter1437 in self.success: + iter1437.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31818,10 +31879,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1434, _size1431) = iprot.readListBegin() - for _i1435 in xrange(_size1431): - _elem1436 = iprot.readString() - self.names.append(_elem1436) + (_etype1441, _size1438) = iprot.readListBegin() + for _i1442 in xrange(_size1438): + _elem1443 = iprot.readString() + self.names.append(_elem1443) iprot.readListEnd() else: iprot.skip(ftype) @@ -31846,8 +31907,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1437 in self.names: - oprot.writeString(iter1437) + for iter1444 in self.names: + oprot.writeString(iter1444) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -31906,11 +31967,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1441, _size1438) = iprot.readListBegin() - for _i1442 in xrange(_size1438): - _elem1443 = Partition() - _elem1443.read(iprot) - self.success.append(_elem1443) + (_etype1448, _size1445) = iprot.readListBegin() + for _i1449 in xrange(_size1445): + _elem1450 = Partition() + _elem1450.read(iprot) + self.success.append(_elem1450) iprot.readListEnd() else: iprot.skip(ftype) @@ -31939,8 +32000,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1444 in self.success: - iter1444.write(oprot) + for iter1451 in self.success: + iter1451.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -32349,11 +32410,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1448, _size1445) = iprot.readListBegin() - for _i1449 in xrange(_size1445): - _elem1450 = Partition() - _elem1450.read(iprot) - self.new_parts.append(_elem1450) + (_etype1455, _size1452) = iprot.readListBegin() + for _i1456 in xrange(_size1452): + _elem1457 = Partition() + _elem1457.read(iprot) + self.new_parts.append(_elem1457) iprot.readListEnd() else: iprot.skip(ftype) @@ -32378,8 +32439,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1451 in self.new_parts: - iter1451.write(oprot) + for iter1458 in self.new_parts: + iter1458.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -32532,11 +32593,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1455, _size1452) = iprot.readListBegin() - for _i1456 in xrange(_size1452): - _elem1457 = Partition() - _elem1457.read(iprot) - self.new_parts.append(_elem1457) + (_etype1462, _size1459) = iprot.readListBegin() + for _i1463 in xrange(_size1459): + _elem1464 = Partition() + _elem1464.read(iprot) + self.new_parts.append(_elem1464) iprot.readListEnd() else: iprot.skip(ftype) @@ -32567,8 +32628,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1458 in self.new_parts: - iter1458.write(oprot) + for iter1465 in self.new_parts: + iter1465.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -33071,10 +33132,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1462, _size1459) = iprot.readListBegin() - for _i1463 in xrange(_size1459): - _elem1464 = iprot.readString() - self.part_vals.append(_elem1464) + (_etype1469, _size1466) = iprot.readListBegin() + for _i1470 in xrange(_size1466): + _elem1471 = iprot.readString() + self.part_vals.append(_elem1471) iprot.readListEnd() else: iprot.skip(ftype) @@ -33105,8 +33166,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1465 in self.part_vals: - oprot.writeString(iter1465) + for iter1472 in self.part_vals: + oprot.writeString(iter1472) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -33407,10 +33468,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1469, _size1466) = iprot.readListBegin() - for _i1470 in xrange(_size1466): - _elem1471 = iprot.readString() - self.part_vals.append(_elem1471) + (_etype1476, _size1473) = iprot.readListBegin() + for _i1477 in xrange(_size1473): + _elem1478 = iprot.readString() + self.part_vals.append(_elem1478) iprot.readListEnd() else: iprot.skip(ftype) @@ -33432,8 +33493,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1472 in self.part_vals: - oprot.writeString(iter1472) + for iter1479 in self.part_vals: + oprot.writeString(iter1479) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -33791,10 +33852,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1476, _size1473) = iprot.readListBegin() - for _i1477 in xrange(_size1473): - _elem1478 = iprot.readString() - self.success.append(_elem1478) + (_etype1483, _size1480) = iprot.readListBegin() + for _i1484 in xrange(_size1480): + _elem1485 = iprot.readString() + self.success.append(_elem1485) iprot.readListEnd() else: iprot.skip(ftype) @@ -33817,8 +33878,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1479 in self.success: - oprot.writeString(iter1479) + for iter1486 in self.success: + oprot.writeString(iter1486) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33942,11 +34003,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1481, _vtype1482, _size1480 ) = iprot.readMapBegin() - for _i1484 in xrange(_size1480): - _key1485 = iprot.readString() - _val1486 = iprot.readString() - self.success[_key1485] = _val1486 + (_ktype1488, _vtype1489, _size1487 ) = iprot.readMapBegin() + for _i1491 in xrange(_size1487): + _key1492 = iprot.readString() + _val1493 = iprot.readString() + self.success[_key1492] = _val1493 iprot.readMapEnd() else: iprot.skip(ftype) @@ -33969,9 +34030,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1487,viter1488 in self.success.items(): - oprot.writeString(kiter1487) - oprot.writeString(viter1488) + for kiter1494,viter1495 in self.success.items(): + oprot.writeString(kiter1494) + oprot.writeString(viter1495) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -34047,11 +34108,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1490, _vtype1491, _size1489 ) = iprot.readMapBegin() - for _i1493 in xrange(_size1489): - _key1494 = iprot.readString() - _val1495 = iprot.readString() - self.part_vals[_key1494] = _val1495 + (_ktype1497, _vtype1498, _size1496 ) = iprot.readMapBegin() + for _i1500 in xrange(_size1496): + _key1501 = iprot.readString() + _val1502 = iprot.readString() + self.part_vals[_key1501] = _val1502 iprot.readMapEnd() else: iprot.skip(ftype) @@ -34081,9 +34142,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1496,viter1497 in self.part_vals.items(): - oprot.writeString(kiter1496) - oprot.writeString(viter1497) + for kiter1503,viter1504 in self.part_vals.items(): + oprot.writeString(kiter1503) + oprot.writeString(viter1504) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -34297,11 +34358,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1499, _vtype1500, _size1498 ) = iprot.readMapBegin() - for _i1502 in xrange(_size1498): - _key1503 = iprot.readString() - _val1504 = iprot.readString() - self.part_vals[_key1503] = _val1504 + (_ktype1506, _vtype1507, _size1505 ) = iprot.readMapBegin() + for _i1509 in xrange(_size1505): + _key1510 = iprot.readString() + _val1511 = iprot.readString() + self.part_vals[_key1510] = _val1511 iprot.readMapEnd() else: iprot.skip(ftype) @@ -34331,9 +34392,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1505,viter1506 in self.part_vals.items(): - oprot.writeString(kiter1505) - oprot.writeString(viter1506) + for kiter1512,viter1513 in self.part_vals.items(): + oprot.writeString(kiter1512) + oprot.writeString(viter1513) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -38385,10 +38446,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1510, _size1507) = iprot.readListBegin() - for _i1511 in xrange(_size1507): - _elem1512 = iprot.readString() - self.success.append(_elem1512) + (_etype1517, _size1514) = iprot.readListBegin() + for _i1518 in xrange(_size1514): + _elem1519 = iprot.readString() + self.success.append(_elem1519) iprot.readListEnd() else: iprot.skip(ftype) @@ -38411,8 +38472,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1513 in self.success: - oprot.writeString(iter1513) + for iter1520 in self.success: + oprot.writeString(iter1520) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -39100,10 +39161,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1517, _size1514) = iprot.readListBegin() - for _i1518 in xrange(_size1514): - _elem1519 = iprot.readString() - self.success.append(_elem1519) + (_etype1524, _size1521) = iprot.readListBegin() + for _i1525 in xrange(_size1521): + _elem1526 = iprot.readString() + self.success.append(_elem1526) iprot.readListEnd() else: iprot.skip(ftype) @@ -39126,8 +39187,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1520 in self.success: - oprot.writeString(iter1520) + for iter1527 in self.success: + oprot.writeString(iter1527) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -39641,11 +39702,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1524, _size1521) = iprot.readListBegin() - for _i1525 in xrange(_size1521): - _elem1526 = Role() - _elem1526.read(iprot) - self.success.append(_elem1526) + (_etype1531, _size1528) = iprot.readListBegin() + for _i1532 in xrange(_size1528): + _elem1533 = Role() + _elem1533.read(iprot) + self.success.append(_elem1533) iprot.readListEnd() else: iprot.skip(ftype) @@ -39668,8 +39729,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1527 in self.success: - iter1527.write(oprot) + for iter1534 in self.success: + iter1534.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -40178,10 +40239,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1531, _size1528) = iprot.readListBegin() - for _i1532 in xrange(_size1528): - _elem1533 = iprot.readString() - self.group_names.append(_elem1533) + (_etype1538, _size1535) = iprot.readListBegin() + for _i1539 in xrange(_size1535): + _elem1540 = iprot.readString() + self.group_names.append(_elem1540) iprot.readListEnd() else: iprot.skip(ftype) @@ -40206,8 +40267,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1534 in self.group_names: - oprot.writeString(iter1534) + for iter1541 in self.group_names: + oprot.writeString(iter1541) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -40434,11 +40495,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1538, _size1535) = iprot.readListBegin() - for _i1539 in xrange(_size1535): - _elem1540 = HiveObjectPrivilege() - _elem1540.read(iprot) - self.success.append(_elem1540) + (_etype1545, _size1542) = iprot.readListBegin() + for _i1546 in xrange(_size1542): + _elem1547 = HiveObjectPrivilege() + _elem1547.read(iprot) + self.success.append(_elem1547) iprot.readListEnd() else: iprot.skip(ftype) @@ -40461,8 +40522,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1541 in self.success: - iter1541.write(oprot) + for iter1548 in self.success: + iter1548.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -41132,10 +41193,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1545, _size1542) = iprot.readListBegin() - for _i1546 in xrange(_size1542): - _elem1547 = iprot.readString() - self.group_names.append(_elem1547) + (_etype1552, _size1549) = iprot.readListBegin() + for _i1553 in xrange(_size1549): + _elem1554 = iprot.readString() + self.group_names.append(_elem1554) iprot.readListEnd() else: iprot.skip(ftype) @@ -41156,8 +41217,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1548 in self.group_names: - oprot.writeString(iter1548) + for iter1555 in self.group_names: + oprot.writeString(iter1555) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -41212,10 +41273,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1552, _size1549) = iprot.readListBegin() - for _i1553 in xrange(_size1549): - _elem1554 = iprot.readString() - self.success.append(_elem1554) + (_etype1559, _size1556) = iprot.readListBegin() + for _i1560 in xrange(_size1556): + _elem1561 = iprot.readString() + self.success.append(_elem1561) iprot.readListEnd() else: iprot.skip(ftype) @@ -41238,8 +41299,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1555 in self.success: - oprot.writeString(iter1555) + for iter1562 in self.success: + oprot.writeString(iter1562) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -42171,10 +42232,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1559, _size1556) = iprot.readListBegin() - for _i1560 in xrange(_size1556): - _elem1561 = iprot.readString() - self.success.append(_elem1561) + (_etype1566, _size1563) = iprot.readListBegin() + for _i1567 in xrange(_size1563): + _elem1568 = iprot.readString() + self.success.append(_elem1568) iprot.readListEnd() else: iprot.skip(ftype) @@ -42191,8 +42252,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1562 in self.success: - oprot.writeString(iter1562) + for iter1569 in self.success: + oprot.writeString(iter1569) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -42719,10 +42780,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1566, _size1563) = iprot.readListBegin() - for _i1567 in xrange(_size1563): - _elem1568 = iprot.readString() - self.success.append(_elem1568) + (_etype1573, _size1570) = iprot.readListBegin() + for _i1574 in xrange(_size1570): + _elem1575 = iprot.readString() + self.success.append(_elem1575) iprot.readListEnd() else: iprot.skip(ftype) @@ -42739,8 +42800,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1569 in self.success: - oprot.writeString(iter1569) + for iter1576 in self.success: + oprot.writeString(iter1576) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -45753,10 +45814,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1573, _size1570) = iprot.readListBegin() - for _i1574 in xrange(_size1570): - _elem1575 = iprot.readString() - self.success.append(_elem1575) + (_etype1580, _size1577) = iprot.readListBegin() + for _i1581 in xrange(_size1577): + _elem1582 = iprot.readString() + self.success.append(_elem1582) iprot.readListEnd() else: iprot.skip(ftype) @@ -45773,8 +45834,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1576 in self.success: - oprot.writeString(iter1576) + for iter1583 in self.success: + oprot.writeString(iter1583) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -52084,11 +52145,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1580, _size1577) = iprot.readListBegin() - for _i1581 in xrange(_size1577): - _elem1582 = SchemaVersion() - _elem1582.read(iprot) - self.success.append(_elem1582) + (_etype1587, _size1584) = iprot.readListBegin() + for _i1588 in xrange(_size1584): + _elem1589 = SchemaVersion() + _elem1589.read(iprot) + self.success.append(_elem1589) iprot.readListEnd() else: iprot.skip(ftype) @@ -52117,8 +52178,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1583 in self.success: - iter1583.write(oprot) + for iter1590 in self.success: + iter1590.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -53593,11 +53654,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1587, _size1584) = iprot.readListBegin() - for _i1588 in xrange(_size1584): - _elem1589 = RuntimeStat() - _elem1589.read(iprot) - self.success.append(_elem1589) + (_etype1594, _size1591) = iprot.readListBegin() + for _i1595 in xrange(_size1591): + _elem1596 = RuntimeStat() + _elem1596.read(iprot) + self.success.append(_elem1596) iprot.readListEnd() else: iprot.skip(ftype) @@ -53620,8 +53681,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1590 in self.success: - iter1590.write(oprot) + for iter1597 in self.success: + iter1597.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -54420,3 +54481,135 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) + +class add_replication_metrics_args: + """ + Attributes: + - replicationMetricList + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'replicationMetricList', (ReplicationMetricList, ReplicationMetricList.thrift_spec), None, ), # 1 + ) + + def __init__(self, replicationMetricList=None,): + self.replicationMetricList = replicationMetricList + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.replicationMetricList = ReplicationMetricList() + self.replicationMetricList.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('add_replication_metrics_args') + if self.replicationMetricList is not None: + oprot.writeFieldBegin('replicationMetricList', TType.STRUCT, 1) + self.replicationMetricList.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.replicationMetricList) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class add_replication_metrics_result: + """ + Attributes: + - o1 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, o1=None,): + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('add_replication_metrics_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 66fdd28c36..29dd5710c2 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -25629,6 +25629,205 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class ReplicationMetrics: + """ + Attributes: + - scheduledExecutionId + - policy + - dumpExecutionId + - metadata + - progress + """ + + thrift_spec = ( + None, # 0 + (1, TType.I64, 'scheduledExecutionId', None, None, ), # 1 + (2, TType.STRING, 'policy', None, None, ), # 2 + (3, TType.I64, 'dumpExecutionId', None, None, ), # 3 + (4, TType.STRING, 'metadata', None, None, ), # 4 + (5, TType.STRING, 'progress', None, None, ), # 5 + ) + + def __init__(self, scheduledExecutionId=None, policy=None, dumpExecutionId=None, metadata=None, progress=None,): + self.scheduledExecutionId = scheduledExecutionId + self.policy = policy + self.dumpExecutionId = dumpExecutionId + self.metadata = metadata + self.progress = progress + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.scheduledExecutionId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.policy = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.dumpExecutionId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.metadata = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.progress = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('ReplicationMetrics') + if self.scheduledExecutionId is not None: + oprot.writeFieldBegin('scheduledExecutionId', TType.I64, 1) + oprot.writeI64(self.scheduledExecutionId) + oprot.writeFieldEnd() + if self.policy is not None: + oprot.writeFieldBegin('policy', TType.STRING, 2) + oprot.writeString(self.policy) + oprot.writeFieldEnd() + if self.dumpExecutionId is not None: + oprot.writeFieldBegin('dumpExecutionId', TType.I64, 3) + oprot.writeI64(self.dumpExecutionId) + oprot.writeFieldEnd() + if self.metadata is not None: + oprot.writeFieldBegin('metadata', TType.STRING, 4) + oprot.writeString(self.metadata) + oprot.writeFieldEnd() + if self.progress is not None: + oprot.writeFieldBegin('progress', TType.STRING, 5) + oprot.writeString(self.progress) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.scheduledExecutionId is None: + raise TProtocol.TProtocolException(message='Required field scheduledExecutionId is unset!') + if self.policy is None: + raise TProtocol.TProtocolException(message='Required field policy is unset!') + if self.dumpExecutionId is None: + raise TProtocol.TProtocolException(message='Required field dumpExecutionId is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.scheduledExecutionId) + value = (value * 31) ^ hash(self.policy) + value = (value * 31) ^ hash(self.dumpExecutionId) + value = (value * 31) ^ hash(self.metadata) + value = (value * 31) ^ hash(self.progress) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class ReplicationMetricList: + """ + Attributes: + - replicationMetricList + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'replicationMetricList', (TType.STRUCT,(ReplicationMetrics, ReplicationMetrics.thrift_spec)), None, ), # 1 + ) + + def __init__(self, replicationMetricList=None,): + self.replicationMetricList = replicationMetricList + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.replicationMetricList = [] + (_etype1008, _size1005) = iprot.readListBegin() + for _i1009 in xrange(_size1005): + _elem1010 = ReplicationMetrics() + _elem1010.read(iprot) + self.replicationMetricList.append(_elem1010) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('ReplicationMetricList') + if self.replicationMetricList is not None: + oprot.writeFieldBegin('replicationMetricList', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.replicationMetricList)) + for iter1011 in self.replicationMetricList: + iter1011.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.replicationMetricList is None: + raise TProtocol.TProtocolException(message='Required field replicationMetricList is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.replicationMetricList) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class AlterPartitionsRequest: """ Attributes: @@ -25688,11 +25887,11 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.partitions = [] - (_etype1008, _size1005) = iprot.readListBegin() - for _i1009 in xrange(_size1005): - _elem1010 = Partition() - _elem1010.read(iprot) - self.partitions.append(_elem1010) + (_etype1015, _size1012) = iprot.readListBegin() + for _i1016 in xrange(_size1012): + _elem1017 = Partition() + _elem1017.read(iprot) + self.partitions.append(_elem1017) iprot.readListEnd() else: iprot.skip(ftype) @@ -25737,8 +25936,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter1011 in self.partitions: - iter1011.write(oprot) + for iter1018 in self.partitions: + iter1018.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environmentContext is not None: @@ -25890,10 +26089,10 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.partVals = [] - (_etype1015, _size1012) = iprot.readListBegin() - for _i1016 in xrange(_size1012): - _elem1017 = iprot.readString() - self.partVals.append(_elem1017) + (_etype1022, _size1019) = iprot.readListBegin() + for _i1023 in xrange(_size1019): + _elem1024 = iprot.readString() + self.partVals.append(_elem1024) iprot.readListEnd() else: iprot.skip(ftype) @@ -25933,8 +26132,8 @@ def write(self, oprot): if self.partVals is not None: oprot.writeFieldBegin('partVals', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partVals)) - for iter1018 in self.partVals: - oprot.writeString(iter1018) + for iter1025 in self.partVals: + oprot.writeString(iter1025) oprot.writeListEnd() oprot.writeFieldEnd() if self.newPart is not None: @@ -26114,10 +26313,10 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.processorCapabilities = [] - (_etype1022, _size1019) = iprot.readListBegin() - for _i1023 in xrange(_size1019): - _elem1024 = iprot.readString() - self.processorCapabilities.append(_elem1024) + (_etype1029, _size1026) = iprot.readListBegin() + for _i1030 in xrange(_size1026): + _elem1031 = iprot.readString() + self.processorCapabilities.append(_elem1031) iprot.readListEnd() else: iprot.skip(ftype) @@ -26167,8 +26366,8 @@ def write(self, oprot): if self.processorCapabilities is not None: oprot.writeFieldBegin('processorCapabilities', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) - for iter1025 in self.processorCapabilities: - oprot.writeString(iter1025) + for iter1032 in self.processorCapabilities: + oprot.writeString(iter1032) oprot.writeListEnd() oprot.writeFieldEnd() if self.processorIdentifier is not None: @@ -26290,10 +26489,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fieldList = [] - (_etype1029, _size1026) = iprot.readListBegin() - for _i1030 in xrange(_size1026): - _elem1031 = iprot.readString() - self.fieldList.append(_elem1031) + (_etype1036, _size1033) = iprot.readListBegin() + for _i1037 in xrange(_size1033): + _elem1038 = iprot.readString() + self.fieldList.append(_elem1038) iprot.readListEnd() else: iprot.skip(ftype) @@ -26320,8 +26519,8 @@ def write(self, oprot): if self.fieldList is not None: oprot.writeFieldBegin('fieldList', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.fieldList)) - for iter1032 in self.fieldList: - oprot.writeString(iter1032) + for iter1039 in self.fieldList: + oprot.writeString(iter1039) oprot.writeListEnd() oprot.writeFieldEnd() if self.includeParamKeyPattern is not None: @@ -26397,10 +26596,10 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.filters = [] - (_etype1036, _size1033) = iprot.readListBegin() - for _i1037 in xrange(_size1033): - _elem1038 = iprot.readString() - self.filters.append(_elem1038) + (_etype1043, _size1040) = iprot.readListBegin() + for _i1044 in xrange(_size1040): + _elem1045 = iprot.readString() + self.filters.append(_elem1045) iprot.readListEnd() else: iprot.skip(ftype) @@ -26421,8 +26620,8 @@ def write(self, oprot): if self.filters is not None: oprot.writeFieldBegin('filters', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.filters)) - for iter1039 in self.filters: - oprot.writeString(iter1039) + for iter1046 in self.filters: + oprot.writeString(iter1046) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26475,11 +26674,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitionSpec = [] - (_etype1043, _size1040) = iprot.readListBegin() - for _i1044 in xrange(_size1040): - _elem1045 = PartitionSpec() - _elem1045.read(iprot) - self.partitionSpec.append(_elem1045) + (_etype1050, _size1047) = iprot.readListBegin() + for _i1051 in xrange(_size1047): + _elem1052 = PartitionSpec() + _elem1052.read(iprot) + self.partitionSpec.append(_elem1052) iprot.readListEnd() else: iprot.skip(ftype) @@ -26496,8 +26695,8 @@ def write(self, oprot): if self.partitionSpec is not None: oprot.writeFieldBegin('partitionSpec', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitionSpec)) - for iter1046 in self.partitionSpec: - iter1046.write(oprot) + for iter1053 in self.partitionSpec: + iter1053.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26601,10 +26800,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.groupNames = [] - (_etype1050, _size1047) = iprot.readListBegin() - for _i1051 in xrange(_size1047): - _elem1052 = iprot.readString() - self.groupNames.append(_elem1052) + (_etype1057, _size1054) = iprot.readListBegin() + for _i1058 in xrange(_size1054): + _elem1059 = iprot.readString() + self.groupNames.append(_elem1059) iprot.readListEnd() else: iprot.skip(ftype) @@ -26623,10 +26822,10 @@ def read(self, iprot): elif fid == 9: if ftype == TType.LIST: self.processorCapabilities = [] - (_etype1056, _size1053) = iprot.readListBegin() - for _i1057 in xrange(_size1053): - _elem1058 = iprot.readString() - self.processorCapabilities.append(_elem1058) + (_etype1063, _size1060) = iprot.readListBegin() + for _i1064 in xrange(_size1060): + _elem1065 = iprot.readString() + self.processorCapabilities.append(_elem1065) iprot.readListEnd() else: iprot.skip(ftype) @@ -26668,8 +26867,8 @@ def write(self, oprot): if self.groupNames is not None: oprot.writeFieldBegin('groupNames', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.groupNames)) - for iter1059 in self.groupNames: - oprot.writeString(iter1059) + for iter1066 in self.groupNames: + oprot.writeString(iter1066) oprot.writeListEnd() oprot.writeFieldEnd() if self.projectionSpec is not None: @@ -26683,8 +26882,8 @@ def write(self, oprot): if self.processorCapabilities is not None: oprot.writeFieldBegin('processorCapabilities', TType.LIST, 9) oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) - for iter1060 in self.processorCapabilities: - oprot.writeString(iter1060) + for iter1067 in self.processorCapabilities: + oprot.writeString(iter1067) oprot.writeListEnd() oprot.writeFieldEnd() if self.processorIdentifier is not None: diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index 3c8152272b..06653d06d1 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -5725,6 +5725,50 @@ class ScheduledQueryProgressInfo ::Thrift::Struct.generate_accessors self end +class ReplicationMetrics + include ::Thrift::Struct, ::Thrift::Struct_Union + SCHEDULEDEXECUTIONID = 1 + POLICY = 2 + DUMPEXECUTIONID = 3 + METADATA = 4 + PROGRESS = 5 + + FIELDS = { + SCHEDULEDEXECUTIONID => {:type => ::Thrift::Types::I64, :name => 'scheduledExecutionId'}, + POLICY => {:type => ::Thrift::Types::STRING, :name => 'policy'}, + DUMPEXECUTIONID => {:type => ::Thrift::Types::I64, :name => 'dumpExecutionId'}, + METADATA => {:type => ::Thrift::Types::STRING, :name => 'metadata', :optional => true}, + PROGRESS => {:type => ::Thrift::Types::STRING, :name => 'progress', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field scheduledExecutionId is unset!') unless @scheduledExecutionId + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field policy is unset!') unless @policy + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dumpExecutionId is unset!') unless @dumpExecutionId + end + + ::Thrift::Struct.generate_accessors self +end + +class ReplicationMetricList + include ::Thrift::Struct, ::Thrift::Struct_Union + REPLICATIONMETRICLIST = 1 + + FIELDS = { + REPLICATIONMETRICLIST => {:type => ::Thrift::Types::LIST, :name => 'replicationMetricList', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ReplicationMetrics}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field replicationMetricList is unset!') unless @replicationMetricList + end + + ::Thrift::Struct.generate_accessors self +end + class AlterPartitionsRequest include ::Thrift::Struct, ::Thrift::Struct_Union CATNAME = 1 diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index ebb22c0646..53fa416415 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -3881,6 +3881,21 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_scheduled_query failed: unknown result') end + def add_replication_metrics(replicationMetricList) + send_add_replication_metrics(replicationMetricList) + recv_add_replication_metrics() + end + + def send_add_replication_metrics(replicationMetricList) + send_message('add_replication_metrics', Add_replication_metrics_args, :replicationMetricList => replicationMetricList) + end + + def recv_add_replication_metrics() + result = receive_message(Add_replication_metrics_result) + raise result.o1 unless result.o1.nil? + return + end + end class Processor < ::FacebookService::Processor @@ -6789,6 +6804,17 @@ module ThriftHiveMetastore write_result(result, oprot, 'get_scheduled_query', seqid) end + def process_add_replication_metrics(seqid, iprot, oprot) + args = read_args(iprot, Add_replication_metrics_args) + result = Add_replication_metrics_result.new() + begin + @handler.add_replication_metrics(args.replicationMetricList) + rescue ::MetaException => o1 + result.o1 = o1 + end + write_result(result, oprot, 'add_replication_metrics', seqid) + end + end # HELPER FUNCTIONS AND STRUCTURES @@ -15369,5 +15395,37 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Add_replication_metrics_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REPLICATIONMETRICLIST = 1 + + FIELDS = { + REPLICATIONMETRICLIST => {:type => ::Thrift::Types::STRUCT, :name => 'replicationMetricList', :class => ::ReplicationMetricList} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Add_replication_metrics_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + end diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index c120ebc7e9..c0a9d1e31d 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -4175,6 +4175,11 @@ public void scheduledQueryMaintenance(ScheduledQueryMaintenanceRequest request) client.scheduled_query_maintenance(request); } + @Override + public void addReplicationMetrics(ReplicationMetricList replicationMetricList) throws MetaException, TException { + client.add_replication_metrics(replicationMetricList); + } + /** * Builder for requiredFields bitmask to be sent via GetTablesExtRequest */ diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 23071643a0..b66e837dfa 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -3983,4 +3983,11 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam */ void scheduledQueryProgress(ScheduledQueryProgressInfo info) throws TException; + /** + * Adds replication metrics for the replication policies. + * @param replicationMetricList + * @throws MetaException + */ + void addReplicationMetrics(ReplicationMetricList replicationMetricList) throws MetaException, TException; + } diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index d1db106270..db1017beda 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -963,6 +963,17 @@ public static ConfVars getMetaConf(String name) { "hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/, "Maximum file size (in bytes) that Hive uses to do single HDFS copies between directories." + "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."), + REPL_METRICS_CACHE_MAXSIZE("metastore.repl.metrics.cache.maxsize", + "hive.repl.metrics.cache.maxsize", 10000 /*10000 rows */, + "Maximum in memory cache size to collect replication metrics. The metrics will be pushed to persistent" + + " storage at a frequency defined by config hive.repl.metrics.update.frequency. Till metrics are persisted to" + + " db, it will be stored in this cache. So set this property based on number of concurrent policies running " + + " and the frequency of persisting the metrics to persistent storage. " + ), + REPL_METRICS_UPDATE_FREQUENCY("metastore.repl.metrics.update.frequency", + "hive.repl.metrics.update.frequency", 1 /*1 minute */, + "Frequency at which replication Metrics will be stored in persistent storage. " + ), SCHEMA_INFO_CLASS("metastore.schema.info.class", "hive.metastore.schema.info.class", "org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo", "Fully qualified class name for the metastore schema information class \n" diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index c63468304d..c1b58b8bdb 100644 --- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -1839,6 +1839,19 @@ struct ScheduledQueryProgressInfo{ 4: optional string errorMessage, } +struct ReplicationMetrics{ + 1: required i64 scheduledExecutionId, + 2: required string policy, + 3: required i64 dumpExecutionId, + 4: optional string metadata, + 5: optional string progress, +} + +struct ReplicationMetricList{ + 1: required list replicationMetricList, +} + + struct AlterPartitionsRequest { 1: optional string catName, 2: required string dbName, @@ -2656,6 +2669,8 @@ service ThriftHiveMetastore extends fb303.FacebookService void scheduled_query_maintenance(1: ScheduledQueryMaintenanceRequest request) throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:AlreadyExistsException o3, 4:InvalidInputException o4) void scheduled_query_progress(1: ScheduledQueryProgressInfo info) throws(1:MetaException o1, 2: InvalidOperationException o2) ScheduledQuery get_scheduled_query(1: ScheduledQueryKey scheduleKey) throws(1:MetaException o1, 2:NoSuchObjectException o2) + + void add_replication_metrics(1: ReplicationMetricList replicationMetricList) throws(1:MetaException o1) } // * Note about the DDL_TIME: When creating or altering a table or a partition, diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 6d68035bb2..6a11c64934 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -9893,6 +9893,21 @@ public ScheduledQuery get_scheduled_query(ScheduledQueryKey scheduleKey) throws endFunction("get_scheduled_query", ex == null, ex); } } + + @Override + public void add_replication_metrics(ReplicationMetricList replicationMetricList) throws MetaException{ + startFunction("add_replication_metrics"); + Exception ex = null; + try { + getMS().addReplicationMetric(replicationMetricList); + } catch (Exception e) { + LOG.error("Caught exception", e); + ex = e; + throw e; + } finally { + endFunction("add_replication_metrics", ex == null, ex); + } + } } private static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, Configuration conf) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 8fd49827b9..0614bda602 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -78,141 +78,14 @@ import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.FunctionType; -import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec; -import org.apache.hadoop.hive.metastore.api.GetPartitionsProjectionSpec; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.ISchema; -import org.apache.hadoop.hive.metastore.api.ISchemaName; -import org.apache.hadoop.hive.metastore.api.InvalidInputException; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionFilterMode; -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; -import org.apache.hadoop.hive.metastore.api.PartitionValuesRow; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; -import org.apache.hadoop.hive.metastore.api.QueryState; -import org.apache.hadoop.hive.metastore.api.ResourceType; -import org.apache.hadoop.hive.metastore.api.ResourceUri; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.RuntimeStat; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.ScheduledQuery; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryKey; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryMaintenanceRequest; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryPollRequest; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryPollResponse; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryProgressInfo; -import org.apache.hadoop.hive.metastore.api.SchemaCompatibility; -import org.apache.hadoop.hive.metastore.api.SchemaType; -import org.apache.hadoop.hive.metastore.api.SchemaValidation; -import org.apache.hadoop.hive.metastore.api.SchemaVersion; -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; -import org.apache.hadoop.hive.metastore.api.SchemaVersionState; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.SerdeType; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.metastore.api.WMPoolTrigger; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; -import org.apache.hadoop.hive.metastore.api.WriteEventInfo; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; -import org.apache.hadoop.hive.metastore.model.FetchGroups; -import org.apache.hadoop.hive.metastore.model.MCatalog; -import org.apache.hadoop.hive.metastore.model.MColumnDescriptor; -import org.apache.hadoop.hive.metastore.model.MConstraint; -import org.apache.hadoop.hive.metastore.model.MCreationMetadata; -import org.apache.hadoop.hive.metastore.model.MDBPrivilege; -import org.apache.hadoop.hive.metastore.model.MDatabase; -import org.apache.hadoop.hive.metastore.model.MDelegationToken; -import org.apache.hadoop.hive.metastore.model.MFieldSchema; -import org.apache.hadoop.hive.metastore.model.MFunction; -import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; -import org.apache.hadoop.hive.metastore.model.MISchema; -import org.apache.hadoop.hive.metastore.model.MMasterKey; -import org.apache.hadoop.hive.metastore.model.MMetastoreDBProperties; -import org.apache.hadoop.hive.metastore.model.MNotificationLog; -import org.apache.hadoop.hive.metastore.model.MNotificationNextId; -import org.apache.hadoop.hive.metastore.model.MOrder; -import org.apache.hadoop.hive.metastore.model.MPartition; -import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics; -import org.apache.hadoop.hive.metastore.model.MPartitionEvent; -import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege; -import org.apache.hadoop.hive.metastore.model.MResourceUri; -import org.apache.hadoop.hive.metastore.model.MRole; -import org.apache.hadoop.hive.metastore.model.MRoleMap; -import org.apache.hadoop.hive.metastore.model.MRuntimeStat; -import org.apache.hadoop.hive.metastore.model.MScheduledExecution; -import org.apache.hadoop.hive.metastore.model.MScheduledQuery; -import org.apache.hadoop.hive.metastore.model.MSchemaVersion; -import org.apache.hadoop.hive.metastore.model.MSerDeInfo; -import org.apache.hadoop.hive.metastore.model.MStorageDescriptor; -import org.apache.hadoop.hive.metastore.model.MStringList; -import org.apache.hadoop.hive.metastore.model.MTable; -import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics; -import org.apache.hadoop.hive.metastore.model.MTablePrivilege; -import org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog; -import org.apache.hadoop.hive.metastore.model.MType; -import org.apache.hadoop.hive.metastore.model.MVersionTable; -import org.apache.hadoop.hive.metastore.model.MWMMapping; +import org.apache.hadoop.hive.metastore.model.*; import org.apache.hadoop.hive.metastore.model.MWMMapping.EntityType; -import org.apache.hadoop.hive.metastore.model.MWMPool; -import org.apache.hadoop.hive.metastore.model.MWMResourcePlan; import org.apache.hadoop.hive.metastore.model.MWMResourcePlan.Status; -import org.apache.hadoop.hive.metastore.model.MWMTrigger; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; @@ -13100,6 +12973,33 @@ public void scheduledQueryProgress(ScheduledQueryProgressInfo info) throws Inval } } + @Override + public void addReplicationMetric(ReplicationMetricList replicationMetricList) { + boolean commited = false; + try { + openTransaction(); + List mReplicationMetricsList = new ArrayList<>(); + for (ReplicationMetrics replicationMetric : replicationMetricList.getReplicationMetricList()) { + MReplicationMetrics mReplicationMetrics = pm.getObjectById(MReplicationMetrics.class, replicationMetric.getScheduledExecutionId()); + if (mReplicationMetrics == null) { + mReplicationMetrics = new MReplicationMetrics(); + mReplicationMetrics.setDumpExecutionId(replicationMetric.getDumpExecutionId()); + mReplicationMetrics.setScheduledExecutionId(replicationMetric.getScheduledExecutionId()); + mReplicationMetrics.setPolicy(replicationMetric.getPolicy()); + } + mReplicationMetrics.setMetadata(replicationMetric.getMetadata()); + mReplicationMetrics.setProgress(replicationMetric.getProgress()); + mReplicationMetricsList.add(mReplicationMetrics); + } + pm.makePersistentAll(mReplicationMetricsList); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + } + private void ensureScheduledQueriesEnabled() throws MetaException { if (!MetastoreConf.getBoolVar(conf, ConfVars.SCHEDULED_QUERIES_ENABLED)) { throw new MetaException( diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 8b66a46b6f..d0bcf6e1df 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -36,7 +36,6 @@ import org.apache.thrift.TException; public interface RawStore extends Configurable { - /*** * Annotation to skip retries */ @@ -1843,6 +1842,12 @@ void scheduledQueryMaintenance(ScheduledQueryMaintenanceRequest request) void scheduledQueryProgress(ScheduledQueryProgressInfo info) throws MetaException, NoSuchObjectException, InvalidOperationException; + /** + * Add the replication metrics and progress info + * @param replicationMetricList + */ + void addReplicationMetric(ReplicationMetricList replicationMetricList); + int deleteScheduledExecutions(int maxRetainSecs); int markScheduledExecutionsTimedOut(int timeoutSecs) throws InvalidOperationException, MetaException; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 8d28c1fd5a..9648c83ac5 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -2889,6 +2889,11 @@ public void scheduledQueryProgress(ScheduledQueryProgressInfo info) rawStore.scheduledQueryProgress(info); } + @Override + public void addReplicationMetric(ReplicationMetricList replicationMetricList) { + rawStore.addReplicationMetric(replicationMetricList); + } + @Override public ScheduledQuery getScheduledQuery(ScheduledQueryKey scheduleKey) throws MetaException, NoSuchObjectException { return rawStore.getScheduledQuery(scheduleKey); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MReplicationMetrics.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MReplicationMetrics.java new file mode 100644 index 0000000000..56c493cd0b --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MReplicationMetrics.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +/** + * Describes Replication Metrics. + */ +public class MReplicationMetrics { + private long scheduledExecutionId; + private String policy; + private long dumpExecutionId; + private String metadata; + private String progress; + + public MReplicationMetrics() { + } + +// public static MReplicationMetrics fromThrift(ReplicationMetrics metrics) { +// MReplicationMetrics ret = new MReplicationMetrics(); +//// ret.scheduledExecutionId = metrics.getEx; +//// ret.payload = stat.getPayload(); +//// ret.createTime = (int) (System.currentTimeMillis() / 1000); +// return ret; +// } + +// public static ReplicationMetrics toThrift(MReplicationMetrics stat) { +// ReplicationMetrics ret = new ReplicationMetrics(); +// ret.setWeight(stat.weight); +// ret.setCreateTime(stat.createTime); +// ret.setPayload(stat.payload); +// return ret; +// } + + public long getScheduledExecutionId() { + return scheduledExecutionId; + } + + public void setScheduledExecutionId(long scheduledExecutionId) { + this.scheduledExecutionId = scheduledExecutionId; + } + + public String getPolicy() { + return policy; + } + + public void setPolicy(String policy) { + this.policy = policy; + } + + public long getDumpExecutionId() { + return dumpExecutionId; + } + + public void setDumpExecutionId(long dumpExecutionId) { + this.dumpExecutionId = dumpExecutionId; + } + + public String getMetadata() { + return metadata; + } + + public void setMetadata(String metadata) { + this.metadata = metadata; + } + + public String getProgress() { + return progress; + } + + public void setProgress(String progress) { + this.progress = progress; + } +} diff --git a/standalone-metastore/metastore-server/src/main/resources/package.jdo b/standalone-metastore/metastore-server/src/main/resources/package.jdo index b85631603e..539f302119 100644 --- a/standalone-metastore/metastore-server/src/main/resources/package.jdo +++ b/standalone-metastore/metastore-server/src/main/resources/package.jdo @@ -1523,6 +1523,29 @@ + + + + + + + + + + + + + + + + + + + + + + + diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql index d548c9094f..83d998463b 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql @@ -770,6 +770,19 @@ CREATE INDEX LASTUPDATETIMEINDEX ON APP.SCHEDULED_EXECUTIONS (LAST_UPDATE_TIME); CREATE INDEX SCHEDULED_EXECUTIONS_SCHQID ON APP.SCHEDULED_EXECUTIONS (SCHEDULED_QUERY_ID); CREATE UNIQUE INDEX SCHEDULED_EXECUTIONS_UNIQUE_ID ON APP.SCHEDULED_EXECUTIONS (SCHEDULED_EXECUTION_ID); +--HIVE-23516 +CREATE TABLE "APP"."REPLICATION_METRICS" ( + "RM_SCHID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMPSCHID" bigint NOT NULL, + "RM_META" varchar, + "RM_PROGRESS" varchar, + PRIMARY KEY("RM_SCHID") +); + +CREATE INDEX "POLICY_IDX" ON "APP"."REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX ON" "APP"."REPLICATION_METRICS" ("RM_DUMPSCHID"); + -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql index db6cebc277..1619d86016 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql @@ -83,5 +83,18 @@ ALTER TABLE TXNS DROP COLUMN TXN_ID_TMP; RENAME TABLE NEXT_TXN_ID TO TXN_LOCK_TBL; RENAME COLUMN TXN_LOCK_TBL.NTXN_NEXT TO TXN_LOCK; +--HIVE-23516 +CREATE TABLE "APP"."REPLICATION_METRICS" ( + "RM_SCHID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMPSCHID" bigint NOT NULL, + "RM_META" varchar, + "RM_PROGRESS" varchar, + PRIMARY KEY("RM_SCHID") +); + +CREATE INDEX "POLICY_IDX" ON "APP"."REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "APP"."REPLICATION_METRICS" ("RM_DUMPSCHID"); + -- This needs to be the last thing done. Insert any changes above this line. UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql index 6906bdf6b9..d9d47a8b28 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql @@ -1318,6 +1318,20 @@ CREATE TABLE "SCHEDULED_EXECUTIONS" ( CREATE INDEX IDX_SCHEDULED_EX_LAST_UPDATE ON "SCHEDULED_EXECUTIONS" ("LAST_UPDATE_TIME"); CREATE INDEX IDX_SCHEDULED_EX_SQ_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_QUERY_ID"); +--HIVE-23516 +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMPSCHID" bigint NOT NULL, + "RM_META" varchar, + "RM_PROGRESS" varchar, + PRIMARY KEY("RM_SCHID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMPSCHID"); + -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script -- ----------------------------------------------------------------- diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql index 77098142d1..5ff1b9c2cc 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql @@ -119,6 +119,20 @@ Exec sp_rename 'TXN_LOCK_TBL.NTXN_NEXT', 'TXN_LOCK', 'COLUMN'; ALTER TABLE TXN_COMPONENTS WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID); CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); +--HIVE-23516 +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMPSCHID" bigint NOT NULL, + "RM_META" varchar, + "RM_PROGRESS" varchar, + PRIMARY KEY("RM_SCHID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMPSCHID"); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql index b7f423c326..d7fcd49cfc 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -1242,6 +1242,20 @@ CREATE INDEX IDX_SCHEDULED_EXECUTIONS_LAST_UPDATE_TIME ON SCHEDULED_EXECUTIONS ( CREATE INDEX IDX_SCHEDULED_EXECUTIONS_SCHEDULED_QUERY_ID ON SCHEDULED_EXECUTIONS (SCHEDULED_QUERY_ID); CREATE UNIQUE INDEX UNIQUE_SCHEDULED_EXECUTIONS_ID ON SCHEDULED_EXECUTIONS (SCHEDULED_EXECUTION_ID); +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMPSCHID" bigint NOT NULL, + "RM_META" varchar, + "RM_PROGRESS" varchar, + PRIMARY KEY("RM_SCHID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMPSCHID"); + -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql index 02920666f8..1a3bebd949 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql @@ -90,6 +90,20 @@ DEALLOCATE PREPARE stmt; RENAME TABLE NEXT_TXN_ID TO TXN_LOCK_TBL; ALTER TABLE TXN_LOCK_TBL RENAME COLUMN NTXN_NEXT TO TXN_LOCK; +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMPSCHID" bigint NOT NULL, + "RM_META" varchar, + "RM_PROGRESS" varchar, + PRIMARY KEY("RM_SCHID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMPSCHID"); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql index 0082dcd06c..1b2145d424 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql @@ -1216,6 +1216,20 @@ CREATE TABLE "SCHEDULED_EXECUTIONS" ( CREATE INDEX IDX_SCHEDULED_EX_LAST_UPDATE ON "SCHEDULED_EXECUTIONS" ("LAST_UPDATE_TIME"); CREATE INDEX IDX_SCHEDULED_EX_SQ_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_QUERY_ID"); +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHID" number NOT NULL, + "RM_POLICY" varchar2(256) NOT NULL, + "RM_DUMPSCHID" number NOT NULL, + "RM_META" varchar2, + "RM_PROGRESS" varchar2, + PRIMARY KEY("RM_SCHID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMPSCHID"); + -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script -- ----------------------------------------------------------------- diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql index fb28290156..1064b5d572 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql @@ -88,6 +88,20 @@ ALTER TABLE TXNS MODIFY TXN_ID default TXNS_TXN_ID_SEQ.nextval; RENAME TABLE NEXT_TXN_ID TO TXN_LOCK_TBL; ALTER TABLE TXN_LOCK_TBL RENAME COLUMN NTXN_NEXT TO TXN_LOCK; +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHID" number NOT NULL, + "RM_POLICY" varchar2(256) NOT NULL, + "RM_DUMPSCHID" number NOT NULL, + "RM_META" varchar2, + "RM_PROGRESS" varchar2, + PRIMARY KEY("RM_SCHID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMPSCHID"); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS Status from dual; diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql index 717e707407..2fa220f4d1 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql @@ -1904,6 +1904,20 @@ CREATE INDEX IDX_SCHEDULED_EXECUTIONS_LAST_UPDATE_TIME ON "SCHEDULED_EXECUTIONS" CREATE INDEX IDX_SCHEDULED_EXECUTIONS_SCHEDULED_QUERY_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_QUERY_ID"); CREATE UNIQUE INDEX UNIQUE_SCHEDULED_EXECUTIONS_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_EXECUTION_ID"); +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMPSCHID" bigint NOT NULL, + "RM_META" varchar, + "RM_PROGRESS" varchar, + PRIMARY KEY("RM_SCHID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMPSCHID"); + -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql index ca799bae08..8dc7e23fda 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql @@ -213,6 +213,21 @@ ALTER TABLE "TXNS" ALTER "TXN_ID" SET DEFAULT nextval('"TXNS_TXN_ID_SEQ"'); ALTER TABLE "NEXT_TXN_ID" RENAME TO "TXN_LOCK_TBL"; ALTER TABLE "TXN_LOCK_TBL" RENAME COLUMN "NTXN_NEXT" TO "TXN_LOCK"; + +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMPSCHID" bigint NOT NULL, + "RM_META" varchar, + "RM_PROGRESS" varchar, + PRIMARY KEY("RM_SCHID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMPSCHID"); + -- These lines need to be last. Insert any changes above. UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0'; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index e31dc064c1..e35de4975f 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -19,13 +19,7 @@ package org.apache.hadoop.hive.metastore; import org.apache.hadoop.hive.common.TableName; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec; -import org.apache.hadoop.hive.metastore.api.GetPartitionsProjectionSpec; -import org.apache.hadoop.hive.metastore.api.ISchemaName; -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.*; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -35,65 +29,6 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.ISchema; -import org.apache.hadoop.hive.metastore.api.InvalidInputException; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.RuntimeStat; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.ScheduledQuery; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryKey; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryMaintenanceRequest; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryPollRequest; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryPollResponse; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryProgressInfo; -import org.apache.hadoop.hive.metastore.api.SchemaVersion; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; -import org.apache.hadoop.hive.metastore.api.WriteEventInfo; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.ColStatsObjWithSourceInfo; import org.apache.thrift.TException; @@ -1326,6 +1261,11 @@ public void scheduledQueryProgress(ScheduledQueryProgressInfo info) throws Inval objectStore.scheduledQueryProgress(info); } + @Override + public void addReplicationMetric(ReplicationMetricList replicationMetricList) { + objectStore.addReplicationMetric(replicationMetricList); + } + @Override public ScheduledQuery getScheduledQuery(ScheduledQueryKey scheduleKey) throws NoSuchObjectException { return objectStore.getScheduledQuery(scheduleKey); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 89acdcc55b..181c84b215 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -19,13 +19,7 @@ package org.apache.hadoop.hive.metastore; import org.apache.hadoop.hive.common.TableName; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec; -import org.apache.hadoop.hive.metastore.api.GetPartitionsProjectionSpec; -import org.apache.hadoop.hive.metastore.api.ISchemaName; -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.*; import java.nio.ByteBuffer; import java.util.Collections; @@ -33,65 +27,6 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.ISchema; -import org.apache.hadoop.hive.metastore.api.InvalidInputException; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.RuntimeStat; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.ScheduledQuery; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryKey; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryMaintenanceRequest; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryPollRequest; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryPollResponse; -import org.apache.hadoop.hive.metastore.api.ScheduledQueryProgressInfo; -import org.apache.hadoop.hive.metastore.api.SchemaVersion; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.metastore.api.WriteEventInfo; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; @@ -1309,6 +1244,11 @@ public void scheduledQueryMaintenance(ScheduledQueryMaintenanceRequest request) public void scheduledQueryProgress(ScheduledQueryProgressInfo info) { } + @Override + public void addReplicationMetric(ReplicationMetricList replicationMetricList) { + + } + @Override public ScheduledQuery getScheduledQuery(ScheduledQueryKey scheduleKey) { throw new RuntimeException("unimplemented"); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 8f5bde1895..2adf11d103 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -3740,6 +3740,11 @@ public void scheduledQueryProgress(ScheduledQueryProgressInfo info) throws TExce client.scheduled_query_progress(info); } + @Override + public void addReplicationMetrics(ReplicationMetricList replicationMetricList) throws MetaException, TException { + client.add_replication_metrics(replicationMetricList); + } + @Override public ScheduledQueryPollResponse scheduledQueryPoll(ScheduledQueryPollRequest request) throws MetaException, TException {